From a9fd9ece9b9436b6103d084920c6897ef1adbae6 Mon Sep 17 00:00:00 2001 From: jiangheng Date: Sat, 3 Sep 2022 20:35:04 +0800 Subject: [PATCH] backport some patches from community --- ...-nxdomain-redirect-assertion-failure.patch | 123 ++ ...for-nxdomain-redirect-ncachenxdomain.patch | 331 ++++++ ...ne_lock-is-locked-before-unlocking-i.patch | 70 ++ ...-crash-on-dst-initialization-failure.patch | 32 + ...-require-non-zero-length-public-keys.patch | 57 + ...-that-saltlen-is-consistent-with-the.patch | 31 + ...ERR-in-fromwire-if-bits-are-non-zero.patch | 33 + ...l-rcode-to-dns_ttl_t-when-setting-ex.patch | 42 + ...CT_POKE_CLOSE-as-it-triggers-a-tsan-.patch | 100 ++ ...-updating-reading-manager-epoll_even.patch | 124 ++ ...nership-of-aclp-before-calling-destr.patch | 31 + ...nership-of-validatorp-before-calling.patch | 36 + ...rt-0012-Address-lock-order-inversion.patch | 153 +++ ...you-can-t-change-what-you-are-pollin.patch | 69 ++ ...was-read-without-the-lock-being-held.patch | 49 + ...-Missing-locks-in-ns_lwresd_shutdown.patch | 46 + ...-0016-Use-atomics-to-update-counters.patch | 107 ++ ...Obtain-a-lock-on-the-quota-structure.patch | 48 + ...The-node-lock-was-released-too-early.patch | 69 ++ ...er-inversion-between-the-keytable-an.patch | 108 ++ ...-to-release-rwlock-to-prevent-lock-o.patch | 88 ++ ...er-reversals-when-shutting-down-a-vi.patch | 45 + ...lock-when-calling-deref_portentry-as.patch | 157 +++ ...one-before-calling-zone_namerd_tostr.patch | 69 ++ ...or-between-dns_rbt_findnode-and-subt.patch | 163 +++ ...e-in-dns_stats_detach-over-reference.patch | 57 + ...ck-check-of-DNS_ZONEFLG_EXITING-flag.patch | 56 + ...ort-0027-Fix-locking-for-LMDB-0.9.26.patch | 313 +++++ ...-LOC-records-with-non-integer-negati.patch | 48 + ...needs-to-hold-a-reference-to-its-tas.patch | 45 + ...ss-to-flags-in-dns__zone_loadpending.patch | 46 + ...nt-atomically-to-silence-tsan-errors.patch | 45 + ...iled-to-disable-EDNS-as-a-side-effec.patch | 257 ++++ ...ion-on-obtaining-a-headlock-as-it-tr.patch | 51 + ...4-Address-tsan-error-in-view-destroy.patch | 30 + ...tx-blocked-as-it-is-updated-by-multi.patch | 62 + ...ata-if-we-care-about-whether-data-is.patch | 100 ++ ...-linked-while-holding-the-queue-lock.patch | 67 ++ ...-data-race-in-dns_adbentry_overquota.patch | 98 ++ ...rt-0039-Address-lock-order-inversion.patch | 75 ++ ...nding-going-to-zero-while-kicking-th.patch | 33 + ...-data-races-between-socket-bitfields.patch | 95 ++ ...ster_indent-and-dns_master_indentstr.patch | 172 +++ ...d-of-zl-server-and-zl-reconfig-until.patch | 41 + ...-0044-Use-a-reference-counter-for-zt.patch | 219 ++++ ...-to-release-rwlock-to-prevent-lock-o.patch | 119 ++ ...-to-release-rwlock-to-prevent-lock-o.patch | 91 ++ ...-to-release-rwlock-to-prevent-lock-o.patch | 80 ++ ...-ealier-to-prevent-lock-order-invers.patch | 75 ++ ...-control-symtab-to-prevent-data-race.patch | 165 +++ ...rt-0050-Address-lock-order-inversion.patch | 106 ++ ...rder-loop-by-sending-TAT-in-an-event.patch | 283 +++++ ...CHENXRRSET-in-fetch_callback_-dnskey.patch | 55 + ...-refs-when-atomics-are-not-available.patch | 68 ++ ...054-Inactive-incorrectly-incremented.patch | 39 + ...e-TSAN-data-race-in-zone_maintenance.patch | 55 + ...es-when-gss_accept_sec_context-fails.patch | 34 + ...-if-a-transfer-breaks-its-SOA-record.patch | 46 + ...nsistencies-in-checking-added-RRsets.patch | 34 + ...ct-should-reject-rdata-with-DNS_RDAT.patch | 42 + ...nt-atomically-to-silence-tsan-errors.patch | 103 ++ ...essage_t-for-using-attach-detach-sem.patch | 1059 +++++++++++++++++ ...ns-message-state-in-resolver-s-logic.patch | 979 +++++++++++++++ ...ling-dns_message_t-shared-references.patch | 837 +++++++++++++ backport-CVE-2021-25220.patch | 2 +- bind.spec | 157 ++- 66 files changed, 8511 insertions(+), 9 deletions(-) create mode 100644 backport-0000-Fix-nxdomain-redirect-assertion-failure.patch create mode 100644 backport-0001-Add-test-for-nxdomain-redirect-ncachenxdomain.patch create mode 100644 backport-0002-make-sure-new_zone_lock-is-locked-before-unlocking-i.patch create mode 100644 backport-0003-Prevent-crash-on-dst-initialization-failure.patch create mode 100644 backport-0004-IPSECKEY-require-non-zero-length-public-keys.patch create mode 100644 backport-0005-NSEC3PARAM-check-that-saltlen-is-consistent-with-the.patch create mode 100644 backport-0006-A6-return-FORMERR-in-fromwire-if-bits-are-non-zero.patch create mode 100644 backport-0007-Cast-the-original-rcode-to-dns_ttl_t-when-setting-ex.patch create mode 100644 backport-0008-Lock-on-msg-SELECT_POKE_CLOSE-as-it-triggers-a-tsan-.patch create mode 100644 backport-0009-Lock-access-when-updating-reading-manager-epoll_even.patch create mode 100644 backport-0010-Take-complete-ownership-of-aclp-before-calling-destr.patch create mode 100644 backport-0011-Take-complete-ownership-of-validatorp-before-calling.patch create mode 100644 backport-0012-Address-lock-order-inversion.patch create mode 100644 backport-0013-It-appears-that-you-can-t-change-what-you-are-pollin.patch create mode 100644 backport-0014-counter-used-was-read-without-the-lock-being-held.patch create mode 100644 backport-0015-Missing-locks-in-ns_lwresd_shutdown.patch create mode 100644 backport-0016-Use-atomics-to-update-counters.patch create mode 100644 backport-0017-Obtain-a-lock-on-the-quota-structure.patch create mode 100644 backport-0018-The-node-lock-was-released-too-early.patch create mode 100644 backport-0019-Address-lock-order-inversion-between-the-keytable-an.patch create mode 100644 backport-0020-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch create mode 100644 backport-0021-Address-lock-order-reversals-when-shutting-down-a-vi.patch create mode 100644 backport-0022-Hold-qid-lock-when-calling-deref_portentry-as.patch create mode 100644 backport-0023-Lock-zone-before-calling-zone_namerd_tostr.patch create mode 100644 backport-0024-Address-TSAN-error-between-dns_rbt_findnode-and-subt.patch create mode 100644 backport-0025-Address-data-race-in-dns_stats_detach-over-reference.patch create mode 100644 backport-0026-Lock-check-of-DNS_ZONEFLG_EXITING-flag.patch create mode 100644 backport-0027-Fix-locking-for-LMDB-0.9.26.patch create mode 100644 backport-0028-Correctly-encode-LOC-records-with-non-integer-negati.patch create mode 100644 backport-0029-isc_ratelimiter-needs-to-hold-a-reference-to-its-tas.patch create mode 100644 backport-0030-Lock-access-to-flags-in-dns__zone_loadpending.patch create mode 100644 backport-0031-Update-init_count-atomically-to-silence-tsan-errors.patch create mode 100644 backport-0032-dig-bufsize-0-failed-to-disable-EDNS-as-a-side-effec.patch create mode 100644 backport-0033-Remove-optimisation-on-obtaining-a-headlock-as-it-tr.patch create mode 100644 backport-0034-Address-tsan-error-in-view-destroy.patch create mode 100644 backport-0035-Lock-access-to-ctx-blocked-as-it-is-updated-by-multi.patch create mode 100644 backport-0036-Only-test-node-data-if-we-care-about-whether-data-is.patch create mode 100644 backport-0037-Test-if-linked-while-holding-the-queue-lock.patch create mode 100644 backport-0038-Address-data-race-in-dns_adbentry_overquota.patch create mode 100644 backport-0039-Address-lock-order-inversion.patch create mode 100644 backport-0040-Prevent-loads_pending-going-to-zero-while-kicking-th.patch create mode 100644 backport-0041-Address-data-races-between-socket-bitfields.patch create mode 100644 backport-0042-Only-read-dns_master_indent-and-dns_master_indentstr.patch create mode 100644 backport-0043-Defer-read-of-zl-server-and-zl-reconfig-until.patch create mode 100644 backport-0044-Use-a-reference-counter-for-zt.patch create mode 100644 backport-0045-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch create mode 100644 backport-0046-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch create mode 100644 backport-0047-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch create mode 100644 backport-0048-Pause-dbiterator-ealier-to-prevent-lock-order-invers.patch create mode 100644 backport-0049-Lock-access-to-control-symtab-to-prevent-data-race.patch create mode 100644 backport-0050-Address-lock-order-inversion.patch create mode 100644 backport-0051-Break-lock-order-loop-by-sending-TAT-in-an-event.patch create mode 100644 backport-0052-Handle-DNS_R_NCACHENXRRSET-in-fetch_callback_-dnskey.patch create mode 100644 backport-0053-Lock-read-of-refs-when-atomics-are-not-available.patch create mode 100644 backport-0054-Inactive-incorrectly-incremented.patch create mode 100644 backport-0055-Resolve-TSAN-data-race-in-zone_maintenance.patch create mode 100644 backport-0056-Free-resources-when-gss_accept_sec_context-fails.patch create mode 100644 backport-0057-Unload-a-zone-if-a-transfer-breaks-its-SOA-record.patch create mode 100644 backport-0058-Address-inconsistencies-in-checking-added-RRsets.patch create mode 100644 backport-0059-dns_rdata_tostruct-should-reject-rdata-with-DNS_RDAT.patch create mode 100644 backport-0060-Update-init_count-atomically-to-silence-tsan-errors.patch create mode 100644 backport-0061-Refactored-dns_message_t-for-using-attach-detach-sem.patch create mode 100644 backport-0062-Fix-invalid-dns-message-state-in-resolver-s-logic.patch create mode 100644 backport-0063-Properly-handling-dns_message_t-shared-references.patch diff --git a/backport-0000-Fix-nxdomain-redirect-assertion-failure.patch b/backport-0000-Fix-nxdomain-redirect-assertion-failure.patch new file mode 100644 index 0000000..873ba8c --- /dev/null +++ b/backport-0000-Fix-nxdomain-redirect-assertion-failure.patch @@ -0,0 +1,123 @@ +From f3d3703fe38d8dee6bd86349a8fb8b30749d8b49 Mon Sep 17 00:00:00 2001 +From: Matthijs Mekking +Date: Tue, 26 Feb 2019 15:55:29 +0100 +Subject: [PATCH] Fix nxdomain-redirect assertion failure + +- Always set is_zonep in query_getdb; previously it was only set if + result was ISC_R_SUCCESS or ISC_R_NOTFOUND. +- Don't reset is_zone for redirect. +- Style cleanup. + +(cherry picked from commit a85cc641d7a4c66cbde03cc4e31edc038a24df46) +(cherry picked from commit 486a201149ab7523e3b3089fc84f20d7f1a07a24) +Conflict: mv lib/ns/query.c to bin/named/query.c +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/f3d3703fe38d8dee6bd86349a8fb8b30749d8b49 +--- + bin/named/query.c | 32 +++++++++++++++++++------------- + 1 file changed, 19 insertions(+), 13 deletions(-) +diff --git a/bin/named/query.c b/bin/named/query.c +index 33e6d59..eebbd6d 100644 +--- a/bin/named/query.c ++++ b/bin/named/query.c +@@ -1260,7 +1260,6 @@ query_getdb(ns_client_t *client, dns_name_t *name, dns_rdatatype_t qtype, + dns_dbversion_t **versionp, bool *is_zonep) + { + isc_result_t result; +- + isc_result_t tresult; + unsigned int namelabels; + unsigned int zonelabels; +@@ -1277,8 +1276,9 @@ query_getdb(ns_client_t *client, dns_name_t *name, dns_rdatatype_t qtype, + dbp, versionp); + + /* See how many labels are in the zone's name. */ +- if (result == ISC_R_SUCCESS && zone != NULL) ++ if (result == ISC_R_SUCCESS && zone != NULL) { + zonelabels = dns_name_countlabels(dns_zone_getorigin(zone)); ++ } + + /* + * If # zone labels < # name labels, try to find an even better match +@@ -1345,8 +1345,11 @@ query_getdb(ns_client_t *client, dns_name_t *name, dns_rdatatype_t qtype, + * If neither attempt above succeeded, return the cache instead + */ + *is_zonep = true; +- } else if (result == ISC_R_NOTFOUND) { +- result = query_getcachedb(client, name, qtype, dbp, options); ++ } else { ++ if (result == ISC_R_NOTFOUND) { ++ result = query_getcachedb(client, name, qtype, dbp, ++ options); ++ } + *is_zonep = false; + } + return (result); +@@ -6667,11 +6670,13 @@ redirect2(ns_client_t *client, dns_name_t *name, dns_rdataset_t *rdataset, + + CTRACE(ISC_LOG_DEBUG(3), "redirect2"); + +- if (client->view->redirectzone == NULL) ++ if (client->view->redirectzone == NULL) { + return (ISC_R_NOTFOUND); ++ } + +- if (dns_name_issubdomain(name, client->view->redirectzone)) ++ if (dns_name_issubdomain(name, client->view->redirectzone)) { + return (ISC_R_NOTFOUND); ++ } + + found = dns_fixedname_initname(&fixed); + dns_rdataset_init(&trdataset); +@@ -6679,8 +6684,9 @@ redirect2(ns_client_t *client, dns_name_t *name, dns_rdataset_t *rdataset, + dns_clientinfomethods_init(&cm, ns_client_sourceip); + dns_clientinfo_init(&ci, client, NULL); + +- if (WANTDNSSEC(client) && dns_db_iszone(*dbp) && dns_db_issecure(*dbp)) ++ if (WANTDNSSEC(client) && dns_db_iszone(*dbp) && dns_db_issecure(*dbp)) { + return (ISC_R_NOTFOUND); ++ } + + if (WANTDNSSEC(client) && dns_rdataset_isassociated(rdataset)) { + if (rdataset->trust == dns_trust_secure) +@@ -6717,16 +6723,19 @@ redirect2(ns_client_t *client, dns_name_t *name, dns_rdataset_t *rdataset, + redirectname, NULL); + if (result != ISC_R_SUCCESS) + return (ISC_R_NOTFOUND); +- } else ++ } else { + dns_name_copy(redirectname, client->view->redirectzone, NULL); ++ } + + options = 0; + result = query_getdb(client, redirectname, qtype, options, &zone, + &db, &version, &is_zone); +- if (result != ISC_R_SUCCESS) ++ if (result != ISC_R_SUCCESS) { + return (ISC_R_NOTFOUND); +- if (zone != NULL) ++ } ++ if (zone != NULL) { + dns_zone_detach(&zone); ++ } + + /* + * Lookup the requested data in the redirect zone. +@@ -6996,7 +7005,6 @@ query_find(ns_client_t *client, dns_fetchevent_t *event, dns_rdatatype_t qtype) + RESTORE(node, client->query.redirect.node); + RESTORE(zone, client->query.redirect.zone); + authoritative = client->query.redirect.authoritative; +- is_zone = client->query.redirect.is_zone; + + /* + * Free resources used while recursing. +@@ -7093,7 +7101,6 @@ query_find(ns_client_t *client, dns_fetchevent_t *event, dns_rdatatype_t qtype) + free_devent(client, ISC_EVENT_PTR(&event), &event); + } else if (REDIRECT(client)) { + result = client->query.redirect.result; +- is_zone = client->query.redirect.is_zone; + } else { + result = event->result; + } +-- +2.23.0 + diff --git a/backport-0001-Add-test-for-nxdomain-redirect-ncachenxdomain.patch b/backport-0001-Add-test-for-nxdomain-redirect-ncachenxdomain.patch new file mode 100644 index 0000000..e7bcb3b --- /dev/null +++ b/backport-0001-Add-test-for-nxdomain-redirect-ncachenxdomain.patch @@ -0,0 +1,331 @@ +From 2fbadaeec617a5ac7b33eabfeb1eb95a1c1711c9 Mon Sep 17 00:00:00 2001 +From: Matthijs Mekking +Date: Tue, 26 Feb 2019 15:38:18 +0100 +Subject: [PATCH] Add test for nxdomain-redirect ncachenxdomain + +(cherry picked from commit 2d65626630c19bb8159a025accb18e5179da5dc3) +(cherry picked from commit 05d29443eb422748eec0e359f03474bbb983d28c) +Conflict: delete util/copyrights +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/2fbadaeec617a5ac7b33eabfeb1eb95a1c1711c9 +--- + bin/tests/system/redirect/clean.sh | 5 +++ + bin/tests/system/redirect/ns1/root.db | 2 +- + bin/tests/system/redirect/ns4/named.conf.in | 3 +- + bin/tests/system/redirect/ns5/named.conf.in | 30 ++++++++++++++ + bin/tests/system/redirect/ns5/root.db.in | 16 ++++++++ + bin/tests/system/redirect/ns5/sign.sh | 43 +++++++++++++++++++++ + bin/tests/system/redirect/ns5/signed.db.in | 18 +++++++++ + bin/tests/system/redirect/ns5/unsigned.db | 18 +++++++++ + bin/tests/system/redirect/ns6/named.conf.in | 30 ++++++++++++++ + bin/tests/system/redirect/ns6/root.db | 16 ++++++++ + bin/tests/system/redirect/setup.sh | 3 ++ + bin/tests/system/redirect/tests.sh | 16 ++++++++ + 13 files changed, 198 insertions(+), 3 deletions(-) + create mode 100644 bin/tests/system/redirect/ns5/named.conf.in + create mode 100644 bin/tests/system/redirect/ns5/root.db.in + create mode 100644 bin/tests/system/redirect/ns5/sign.sh + create mode 100644 bin/tests/system/redirect/ns5/signed.db.in + create mode 100644 bin/tests/system/redirect/ns5/unsigned.db + create mode 100644 bin/tests/system/redirect/ns6/named.conf.in + create mode 100644 bin/tests/system/redirect/ns6/root.db + +diff --git a/bin/tests/system/redirect/clean.sh b/bin/tests/system/redirect/clean.sh +index 27a65d2a5e..b8bba0d107 100644 +--- a/bin/tests/system/redirect/clean.sh ++++ b/bin/tests/system/redirect/clean.sh +@@ -27,4 +27,9 @@ rm -f ns3/dsset-signed. + rm -f ns3/nsec3.db* + rm -f ns3/signed.db* + rm -f ns4/*.db ++rm -f ns5/dsset-* ++rm -f ns5/K* ns5/sign.ns5.* ++rm -f ns5/root.db ns5/root.db.signed ++rm -f ns5/signed.db ns5/signed.db.signed ++rm -f ns6/signed.db.signed + rm -f rndc.out +diff --git a/bin/tests/system/redirect/ns1/root.db b/bin/tests/system/redirect/ns1/root.db +index 532063c05c..7b8caea29a 100644 +--- a/bin/tests/system/redirect/ns1/root.db ++++ b/bin/tests/system/redirect/ns1/root.db +@@ -11,7 +11,7 @@ $TTL 3600 + @ SOA a.root-servers.nil. marka.isc.org. 0 0 0 0 0 + @ NS a.root-servers.nil. + a.root-servers.nil. A 10.53.0.1 +-example NS ns1.example. ++example NS ns1.example. + ns1.example. A 10.53.0.1 + signed NS ns1.example. + ns1.signed. A 10.53.0.1 +diff --git a/bin/tests/system/redirect/ns4/named.conf.in b/bin/tests/system/redirect/ns4/named.conf.in +index 8e9a0afd19..c6003441fa 100644 +--- a/bin/tests/system/redirect/ns4/named.conf.in ++++ b/bin/tests/system/redirect/ns4/named.conf.in +@@ -16,7 +16,7 @@ controls { /* empty */ }; + acl rfc1918 { 10/8; 192.168/16; 172.16/12; }; + + options { +- query-source address 10.53.0.2; /* note this is not 10.53.0.3 */ ++ query-source address 10.53.0.2; /* note this is not 10.53.0.4 */ + notify-source 10.53.0.4; + transfer-source 10.53.0.4; + port @PORT@; +diff --git a/bin/tests/system/redirect/ns5/named.conf.in b/bin/tests/system/redirect/ns5/named.conf.in +new file mode 100644 +index 0000000000..e06deb02a9 +--- /dev/null ++++ b/bin/tests/system/redirect/ns5/named.conf.in +@@ -0,0 +1,30 @@ ++/* ++ * Copyright (C) Internet Systems Consortium, Inc. ("ISC") ++ * ++ * This Source Code Form is subject to the terms of the Mozilla Public ++ * License, v. 2.0. If a copy of the MPL was not distributed with this ++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. ++ * ++ * See the COPYRIGHT file distributed with this work for additional ++ * information regarding copyright ownership. ++ */ ++ ++// NS5 ++ ++options { ++ port @PORT@; ++ listen-on port @PORT@ { 10.53.0.5; }; ++ pid-file "named.pid"; ++ nxdomain-redirect signed; ++}; ++ ++zone "." { ++ type master; ++ file "root.db.signed"; ++}; ++ ++// An unsigned zone that ns6 has a delegation for. ++zone "unsigned." { ++ type master; ++ file "unsigned.db"; ++}; +diff --git a/bin/tests/system/redirect/ns5/root.db.in b/bin/tests/system/redirect/ns5/root.db.in +new file mode 100644 +index 0000000000..90c634706b +--- /dev/null ++++ b/bin/tests/system/redirect/ns5/root.db.in +@@ -0,0 +1,16 @@ ++; Copyright (C) Internet Systems Consortium, Inc. ("ISC") ++; ++; This Source Code Form is subject to the terms of the Mozilla Public ++; License, v. 2.0. If a copy of the MPL was not distributed with this ++; file, You can obtain one at http://mozilla.org/MPL/2.0/. ++; ++; See the COPYRIGHT file distributed with this work for additional ++; information regarding copyright ownership. ++ ++. 86400 IN SOA a.root-servers.nil. hostmaster.example.net. 2019022100 1800 900 604800 86400 ++. 518400 IN NS a.root-servers.nil. ++a.root-servers.nil. 518400 IN A 10.53.0.5 ++signed. 172800 IN NS ns.signed. ++ns.signed. 172800 IN A 10.53.0.6 ++unsigned. 172800 IN NS ns.unsigned. ++ns.unsigned. 172800 IN A 10.53.0.5 +diff --git a/bin/tests/system/redirect/ns5/sign.sh b/bin/tests/system/redirect/ns5/sign.sh +new file mode 100644 +index 0000000000..e26904a6b2 +--- /dev/null ++++ b/bin/tests/system/redirect/ns5/sign.sh +@@ -0,0 +1,43 @@ ++#!/bin/sh -e ++# ++# Copyright (C) Internet Systems Consortium, Inc. ("ISC") ++# ++# This Source Code Form is subject to the terms of the Mozilla Public ++# License, v. 2.0. If a copy of the MPL was not distributed with this ++# file, You can obtain one at http://mozilla.org/MPL/2.0/. ++# ++# See the COPYRIGHT file distributed with this work for additional ++# information regarding copyright ownership. ++ ++SYSTEMTESTTOP=../.. ++. $SYSTEMTESTTOP/conf.sh ++ ++# We sign the zone here and move the signed zone to ns6. ++# The ns5 server actually does not serve this zone but ++# the DS and NS records are in the test root zone, and ++# delegate to ns6. ++zone=signed. ++infile=signed.db.in ++zonefile=signed.db ++ ++key1=`$KEYGEN -q -a $DEFAULT_ALGORITHM -b $DEFAULT_BITS $zone 2> /dev/null` ++key2=`$KEYGEN -q -a $DEFAULT_ALGORITHM -b $DEFAULT_BITS -fk $zone 2> /dev/null` ++ ++cat $infile $key1.key $key2.key > $zonefile ++ ++$SIGNER -P -g -O full -o $zone $zonefile > sign.ns5.signed.out 2>&1 ++ ++cp signed.db.signed ../ns6 ++ ++# Root zone. ++zone=. ++infile=root.db.in ++zonefile=root.db ++ ++key1=`$KEYGEN -q -a $DEFAULT_ALGORITHM -b $DEFAULT_BITS $zone 2> /dev/null` ++key2=`$KEYGEN -q -a $DEFAULT_ALGORITHM -b $DEFAULT_BITS -fk $zone 2> /dev/null` ++ ++# cat $infile $key1.key $key2.key > $zonefile ++cat $infile dsset-signed. $key1.key $key2.key > $zonefile ++ ++$SIGNER -P -g -O full -o $zone $zonefile > sign.ns5.root.out 2>&1 +diff --git a/bin/tests/system/redirect/ns5/signed.db.in b/bin/tests/system/redirect/ns5/signed.db.in +new file mode 100644 +index 0000000000..8884120b6c +--- /dev/null ++++ b/bin/tests/system/redirect/ns5/signed.db.in +@@ -0,0 +1,18 @@ ++; Copyright (C) Internet Systems Consortium, Inc. ("ISC") ++; ++; This Source Code Form is subject to the terms of the Mozilla Public ++; License, v. 2.0. If a copy of the MPL was not distributed with this ++; file, You can obtain one at http://mozilla.org/MPL/2.0/. ++; ++; See the COPYRIGHT file distributed with this work for additional ++; information regarding copyright ownership. ++ ++$TTL 300 ++@ IN SOA ns.signed. hostmaster.signed. 0 0 0 0 0 ++@ IN NS ns.signed. ++ ++ns.signed. IN A 10.0.53.6 ++domain.signed. IN A 10.0.53.1 ++ ++* IN A 100.100.100.1 ++* IN AAAA 2001:ffff:ffff::100.100.100.1 +diff --git a/bin/tests/system/redirect/ns5/unsigned.db b/bin/tests/system/redirect/ns5/unsigned.db +new file mode 100644 +index 0000000000..0f0604d79e +--- /dev/null ++++ b/bin/tests/system/redirect/ns5/unsigned.db +@@ -0,0 +1,18 @@ ++; Copyright (C) Internet Systems Consortium, Inc. ("ISC") ++; ++; This Source Code Form is subject to the terms of the Mozilla Public ++; License, v. 2.0. If a copy of the MPL was not distributed with this ++; file, You can obtain one at http://mozilla.org/MPL/2.0/. ++; ++; See the COPYRIGHT file distributed with this work for additional ++; information regarding copyright ownership. ++ ++$TTL 300 ++@ IN SOA ns.unsigned. hostmaster.unsigned. 0 0 0 0 0 ++@ IN NS ns.unsigned. ++ ++ns.unsigned. IN A 10.53.0.6 ++domain.unsigned. IN A 10.0.53.1 ++ ++* IN A 100.100.100.1 ++* IN AAAA 2001:ffff:ffff::100.100.100.1 +diff --git a/bin/tests/system/redirect/ns6/named.conf.in b/bin/tests/system/redirect/ns6/named.conf.in +new file mode 100644 +index 0000000000..bca355c3ea +--- /dev/null ++++ b/bin/tests/system/redirect/ns6/named.conf.in +@@ -0,0 +1,30 @@ ++/* ++ * Copyright (C) Internet Systems Consortium, Inc. ("ISC") ++ * ++ * This Source Code Form is subject to the terms of the Mozilla Public ++ * License, v. 2.0. If a copy of the MPL was not distributed with this ++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. ++ * ++ * See the COPYRIGHT file distributed with this work for additional ++ * information regarding copyright ownership. ++ */ ++ ++// NS6 ++ ++options { ++ port @PORT@; ++ listen-on port @PORT@ { 10.53.0.6; }; ++ pid-file "named.pid"; ++ nxdomain-redirect unsigned; ++}; ++ ++zone "." { ++ type master; ++ file "root.db"; ++}; ++ ++// A signed zone that ns5 has a delegation for. ++zone "signed." { ++ type master; ++ file "signed.db.signed"; ++}; +diff --git a/bin/tests/system/redirect/ns6/root.db b/bin/tests/system/redirect/ns6/root.db +new file mode 100644 +index 0000000000..5e78d23ea2 +--- /dev/null ++++ b/bin/tests/system/redirect/ns6/root.db +@@ -0,0 +1,16 @@ ++; Copyright (C) Internet Systems Consortium, Inc. ("ISC") ++; ++; This Source Code Form is subject to the terms of the Mozilla Public ++; License, v. 2.0. If a copy of the MPL was not distributed with this ++; file, You can obtain one at http://mozilla.org/MPL/2.0/. ++; ++; See the COPYRIGHT file distributed with this work for additional ++; information regarding copyright ownership. ++ ++. 86400 IN SOA a.root-servers.nil. hostmaster.example.net. 2019022100 1800 900 604800 86400 ++. 518400 IN NS a.root-servers.nil. ++a.root-servers.nil. 518400 IN A 10.53.0.6 ++signed. 172800 IN NS ns.signed. ++ns.signed. 172800 IN A 10.53.0.6 ++unsigned. 172800 IN NS ns.unsigned. ++ns.unsigned. 172800 IN A 10.53.0.5 +diff --git a/bin/tests/system/redirect/setup.sh b/bin/tests/system/redirect/setup.sh +index c5400205f2..cad235bd43 100644 +--- a/bin/tests/system/redirect/setup.sh ++++ b/bin/tests/system/redirect/setup.sh +@@ -18,6 +18,8 @@ copy_setports ns1/named.conf.in ns1/named.conf + copy_setports ns2/named.conf.in ns2/named.conf + copy_setports ns3/named.conf.in ns3/named.conf + copy_setports ns4/named.conf.in ns4/named.conf ++copy_setports ns5/named.conf.in ns5/named.conf ++copy_setports ns6/named.conf.in ns6/named.conf + + cp ns2/redirect.db.in ns2/redirect.db + cp ns2/example.db.in ns2/example.db +@@ -25,3 +27,4 @@ cp ns2/example.db.in ns2/example.db + + cp ns4/example.db.in ns4/example.db + ( cd ns3 && $SHELL sign.sh ) ++( cd ns5 && $SHELL sign.sh ) +diff --git a/bin/tests/system/redirect/tests.sh b/bin/tests/system/redirect/tests.sh +index 9eb6b42462..810e43e8d8 100644 +--- a/bin/tests/system/redirect/tests.sh ++++ b/bin/tests/system/redirect/tests.sh +@@ -517,5 +517,21 @@ n=`expr $n + 1` + if [ $ret != 0 ]; then echo_i "failed"; fi + status=`expr $status + $ret` + ++echo_i "checking tld nxdomain-redirect against signed root zone ($n)" ++ret=0 ++$DIG $DIGOPTS @10.53.0.5 asdfasdfasdf > dig.out.ns5.test$n || ret=1 ++grep "status: NXDOMAIN" dig.out.ns5.test$n > /dev/null || ret=1 ++n=`expr $n + 1` ++if [ $ret != 0 ]; then echo_i "failed"; fi ++status=`expr $status + $ret` ++ ++echo_i "checking tld nxdomain-redirect against unsigned root zone ($n)" ++ret=0 ++$DIG $DIGOPTS @10.53.0.6 asdfasdfasdf > dig.out.ns6.test$n || ret=1 ++grep "status: NXDOMAIN" dig.out.ns6.test$n > /dev/null || ret=1 ++n=`expr $n + 1` ++if [ $ret != 0 ]; then echo_i "failed"; fi ++status=`expr $status + $ret` ++ + echo_i "exit status: $status" + [ $status -eq 0 ] || exit 1 +-- +2.23.0 + diff --git a/backport-0002-make-sure-new_zone_lock-is-locked-before-unlocking-i.patch b/backport-0002-make-sure-new_zone_lock-is-locked-before-unlocking-i.patch new file mode 100644 index 0000000..a73fc98 --- /dev/null +++ b/backport-0002-make-sure-new_zone_lock-is-locked-before-unlocking-i.patch @@ -0,0 +1,70 @@ +From 0f7f6201e31a9da68d57290b5f4ed31e8248b972 Mon Sep 17 00:00:00 2001 +From: Evan Hunt +Date: Sat, 11 Jul 2020 21:30:53 -0700 +Subject: [PATCH] make sure new_zone_lock is locked before unlocking it + +it was possible for the count_newzones() function to try to +unlock view->new_zone_lock on return before locking it, which +caused a crash on shutdown. + +(cherry picked from commit ed37c63e2bbc4afe299dbe38ae98871dcc1d3470) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/0f7f6201e31a9da68d57290b5f4ed31e8248b972 +--- + bin/named/server.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/bin/named/server.c b/bin/named/server.c +index 4df714e527..e752aa71a9 100644 +--- a/bin/named/server.c ++++ b/bin/named/server.c +@@ -6789,6 +6789,8 @@ count_newzones(dns_view_t *view, ns_cfgctx_t *nzcfg, int *num_zonesp) { + + REQUIRE(num_zonesp != NULL); + ++ LOCK(&view->new_zone_lock); ++ + CHECK(migrate_nzf(view)); + + isc_log_write(ns_g_lctx, +@@ -6797,8 +6799,6 @@ count_newzones(dns_view_t *view, ns_cfgctx_t *nzcfg, int *num_zonesp) { + "for view '%s'", + view->new_zone_db, view->name); + +- LOCK(&view->new_zone_lock); +- + CHECK(nzd_count(view, &n)); + + *num_zonesp = n; +@@ -12006,6 +12006,10 @@ nzd_count(dns_view_t *view, int *countp) { + return (result); + } + ++/* ++ * Migrate zone configuration from an NZF file to an NZD database. ++ * Caller must hold view->new_zone_lock. ++ */ + static isc_result_t + migrate_nzf(dns_view_t *view) { + isc_result_t result; +@@ -12021,8 +12025,6 @@ migrate_nzf(dns_view_t *view) { + MDB_val key, data; + ns_dzarg_t dzarg; + +- LOCK(&view->new_zone_lock); +- + /* + * If NZF file doesn't exist, or NZD DB exists and already + * has data, return without attempting migration. +@@ -12166,8 +12168,6 @@ migrate_nzf(dns_view_t *view) { + result = nzd_close(&txn, commit); + } + +- UNLOCK(&view->new_zone_lock); +- + if (text != NULL) { + isc_buffer_free(&text); + } +-- +2.23.0 + diff --git a/backport-0003-Prevent-crash-on-dst-initialization-failure.patch b/backport-0003-Prevent-crash-on-dst-initialization-failure.patch new file mode 100644 index 0000000..86e0645 --- /dev/null +++ b/backport-0003-Prevent-crash-on-dst-initialization-failure.patch @@ -0,0 +1,32 @@ +From 35fbfaa4981333286437f26557db26863d4c5299 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= +Date: Wed, 22 Jul 2020 18:55:02 +0200 +Subject: [PATCH] Prevent crash on dst initialization failure + +server might be created, but not yet fully initialized, when fatal +function is called. Check both server and task before attaching +exclusive task. + +(cherry picked from commit c5e7152cf04f75d0fe00163f076f4cc3cafce259) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/35fbfaa4981333286437f26557db26863d4c5299 +--- + bin/named/server.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/bin/named/server.c b/bin/named/server.c +index e752aa71a9..9739206ae7 100644 +--- a/bin/named/server.c ++++ b/bin/named/server.c +@@ -9299,7 +9299,7 @@ ns_server_destroy(ns_server_t **serverp) { + + static void + fatal(ns_server_t *server, const char *msg, isc_result_t result) { +- if (server != NULL) { ++ if (server != NULL && server->task != NULL) { + /* + * Prevent races between the OpenSSL on_exit registered + * function and any other OpenSSL calls from other tasks +-- +2.23.0 + diff --git a/backport-0004-IPSECKEY-require-non-zero-length-public-keys.patch b/backport-0004-IPSECKEY-require-non-zero-length-public-keys.patch new file mode 100644 index 0000000..6e25fe0 --- /dev/null +++ b/backport-0004-IPSECKEY-require-non-zero-length-public-keys.patch @@ -0,0 +1,57 @@ +From 73341adc34493738c94681baabe05f3038610147 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 13 Aug 2020 13:08:17 +1000 +Subject: [PATCH] IPSECKEY: require non-zero length public keys + +(cherry picked from commit d7f701480341f33cfbad3bfff9ee3876859e0ce2) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/73341adc34493738c94681baabe05f3038610147 +--- + lib/dns/rdata/generic/ipseckey_45.c | 14 +++++++++++--- + 1 file changed, 11 insertions(+), 3 deletions(-) + +diff --git a/lib/dns/rdata/generic/ipseckey_45.c b/lib/dns/rdata/generic/ipseckey_45.c +index d85f79576f..d986cc956e 100644 +--- a/lib/dns/rdata/generic/ipseckey_45.c ++++ b/lib/dns/rdata/generic/ipseckey_45.c +@@ -217,18 +217,23 @@ fromwire_ipseckey(ARGS_FROMWIRE) { + + switch (region.base[1]) { + case 0: ++ if (region.length < 4) { ++ return (ISC_R_UNEXPECTEDEND); ++ } + isc_buffer_forward(source, region.length); + return (mem_tobuffer(target, region.base, region.length)); + + case 1: +- if (region.length < 7) ++ if (region.length < 8) { + return (ISC_R_UNEXPECTEDEND); ++ } + isc_buffer_forward(source, region.length); + return (mem_tobuffer(target, region.base, region.length)); + + case 2: +- if (region.length < 19) ++ if (region.length < 20) { + return (ISC_R_UNEXPECTEDEND); ++ } + isc_buffer_forward(source, region.length); + return (mem_tobuffer(target, region.base, region.length)); + +@@ -238,7 +243,10 @@ fromwire_ipseckey(ARGS_FROMWIRE) { + RETERR(dns_name_fromwire(&name, source, dctx, options, target)); + isc_buffer_activeregion(source, ®ion); + isc_buffer_forward(source, region.length); +- return(mem_tobuffer(target, region.base, region.length)); ++ if (region.length < 1) { ++ return (ISC_R_UNEXPECTEDEND); ++ } ++ return (mem_tobuffer(target, region.base, region.length)); + + default: + return (ISC_R_NOTIMPLEMENTED); +-- +2.23.0 + diff --git a/backport-0005-NSEC3PARAM-check-that-saltlen-is-consistent-with-the.patch b/backport-0005-NSEC3PARAM-check-that-saltlen-is-consistent-with-the.patch new file mode 100644 index 0000000..8443afc --- /dev/null +++ b/backport-0005-NSEC3PARAM-check-that-saltlen-is-consistent-with-the.patch @@ -0,0 +1,31 @@ +From 712d3a50df589b6937d8b3537967cf16352bb31b Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 13 Aug 2020 13:21:46 +1000 +Subject: [PATCH] NSEC3PARAM: check that saltlen is consistent with the rdata + length + +(cherry picked from commit 7dc8e720ff1360837fc8c0649445bcaa2b1236d4) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/712d3a50df589b6937d8b3537967cf16352bb31b +--- + lib/dns/rdata/generic/nsec3param_51.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/lib/dns/rdata/generic/nsec3param_51.c b/lib/dns/rdata/generic/nsec3param_51.c +index 6140276b54..c515e33c0f 100644 +--- a/lib/dns/rdata/generic/nsec3param_51.c ++++ b/lib/dns/rdata/generic/nsec3param_51.c +@@ -156,8 +156,9 @@ fromwire_nsec3param(ARGS_FROMWIRE) { + saltlen = sr.base[4]; + isc_region_consume(&sr, 5); + +- if (sr.length < saltlen) ++ if (sr.length != saltlen) { + RETERR(DNS_R_FORMERR); ++ } + isc_region_consume(&sr, saltlen); + RETERR(mem_tobuffer(target, rr.base, rr.length)); + isc_buffer_forward(source, rr.length); +-- +2.23.0 + diff --git a/backport-0006-A6-return-FORMERR-in-fromwire-if-bits-are-non-zero.patch b/backport-0006-A6-return-FORMERR-in-fromwire-if-bits-are-non-zero.patch new file mode 100644 index 0000000..11c7963 --- /dev/null +++ b/backport-0006-A6-return-FORMERR-in-fromwire-if-bits-are-non-zero.patch @@ -0,0 +1,33 @@ +From 4236972de4c025d5aa1317f782c3df411457b79a Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Sat, 15 Aug 2020 10:12:50 +1000 +Subject: [PATCH] A6: return FORMERR in fromwire if bits are non zero. + +oss_fuzz: Issue 24864: bind9:dns_rdata_fromwire_text_fuzzer: Overwrites-const-input in dns_rdata_fromwire_text_fuzzer + +(cherry picked from commit 8452404bd7facb89790c32bab508f55bf8d37cea) +(cherry picked from commit d8a57d32b19942eea923eecfa1bdd863b3286835) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/4236972de4c025d5aa1317f782c3df411457b79a +--- + lib/dns/rdata/in_1/a6_38.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/lib/dns/rdata/in_1/a6_38.c b/lib/dns/rdata/in_1/a6_38.c +index 1cdbe62d68..e69fc6fbc8 100644 +--- a/lib/dns/rdata/in_1/a6_38.c ++++ b/lib/dns/rdata/in_1/a6_38.c +@@ -173,7 +173,9 @@ fromwire_in_a6(ARGS_FROMWIRE) { + if (sr.length < octets) + return (ISC_R_UNEXPECTEDEND); + mask = 0xff >> (prefixlen % 8); +- sr.base[0] &= mask; /* Ensure pad bits are zero. */ ++ if ((sr.base[0] & ~mask) != 0) { ++ return (DNS_R_FORMERR); ++ } + RETERR(mem_tobuffer(target, sr.base, octets)); + isc_buffer_forward(source, octets); + } +-- +2.23.0 + diff --git a/backport-0007-Cast-the-original-rcode-to-dns_ttl_t-when-setting-ex.patch b/backport-0007-Cast-the-original-rcode-to-dns_ttl_t-when-setting-ex.patch new file mode 100644 index 0000000..ad26570 --- /dev/null +++ b/backport-0007-Cast-the-original-rcode-to-dns_ttl_t-when-setting-ex.patch @@ -0,0 +1,42 @@ +From b1146514451d57dff844dbfa2c6767d79acb8b7f Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Tue, 25 Aug 2020 22:59:35 +1000 +Subject: [PATCH] Cast the original rcode to (dns_ttl_t) when setting extended + rcode + +Shifting (signed) integer left could trigger undefined behaviour when +the shifted value would overflow into the sign bit (e.g. 2048). + +The issue was found when using AFL++ and UBSAN: + + message.c:2274:33: runtime error: left shift of 2048 by 20 places cannot be represented in type 'int' + SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior message.c:2274:33 in + +(cherry picked from commit a347641782dfb47aa45e6e8ffc9e0c6db4c07deb) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/b1146514451d57dff844dbfa2c6767d79acb8b7f +--- + lib/dns/message.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/lib/dns/message.c b/lib/dns/message.c +index 7c813a5cf6..9dafd69f11 100644 +--- a/lib/dns/message.c ++++ b/lib/dns/message.c +@@ -2318,10 +2318,11 @@ dns_message_renderend(dns_message_t *msg) { + dns_message_renderrelease(msg, msg->opt_reserved); + msg->opt_reserved = 0; + /* +- * Set the extended rcode. ++ * Set the extended rcode. Cast msg->rcode to dns_ttl_t ++ * so that we do a unsigned shift. + */ + msg->opt->ttl &= ~DNS_MESSAGE_EDNSRCODE_MASK; +- msg->opt->ttl |= ((msg->rcode << 20) & ++ msg->opt->ttl |= (((dns_ttl_t)(msg->rcode) << 20) & + DNS_MESSAGE_EDNSRCODE_MASK); + /* + * Render. +-- +2.23.0 + diff --git a/backport-0008-Lock-on-msg-SELECT_POKE_CLOSE-as-it-triggers-a-tsan-.patch b/backport-0008-Lock-on-msg-SELECT_POKE_CLOSE-as-it-triggers-a-tsan-.patch new file mode 100644 index 0000000..2f9cde9 --- /dev/null +++ b/backport-0008-Lock-on-msg-SELECT_POKE_CLOSE-as-it-triggers-a-tsan-.patch @@ -0,0 +1,100 @@ +From d9a2cccc974f33a29cf79d2fe2e8d8afab01b676 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 20 Aug 2020 12:22:51 +1000 +Subject: [PATCH] Lock on msg == SELECT_POKE_CLOSE as it triggers a tsan error + +WARNING: ThreadSanitizer: data race (pid=1941) + Write of size 4 at 0x7ba40000f050 by main thread (mutexes: write M1100, write M75): + #0 socket_create /builds/isc-projects/bind9/1126G:3009:29 (libisc.so.1105+0x60822) + #1 isc__socket_create /builds/isc-projects/bind9/lib/isc/unix/socket.c:3043:10 (libisc.so.1105+0x60583) + #2 isc_socket_create /builds/isc-projects/bind9/lib/isc/unix/./../socket_api.c:105:11 (libisc.so.1105+0x6a4a0) + #3 open_socket /builds/isc-projects/bind9/lib/dns/dispatch.c:1708:12 (libdns.so.1110+0x7491c) + #4 get_udpsocket /builds/isc-projects/bind9/lib/dns/dispatch.c:2904:13 (libdns.so.1110+0x745ec) + #5 dispatch_createudp /builds/isc-projects/bind9/lib/dns/dispatch.c:2994:12 (libdns.so.1110+0x6e159) + #6 dns_dispatch_getudp_dup /builds/isc-projects/bind9/lib/dns/dispatch.c:2823:11 (libdns.so.1110+0x6d8f2) + #7 dns_dispatch_getudp /builds/isc-projects/bind9/lib/dns/dispatch.c:2849:10 (libdns.so.1110+0x6e99e) + #8 make_dispatchset /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:81:11 (dispatch_test+0x4ba8fc) + #9 dispatchset_create /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:115:11 (dispatch_test+0x4b9f99) + #10 (libcmocka.so.0+0x50d8) + #11 __libc_start_main /build/glibc-vjB4T1/glibc-2.28/csu/../csu/libc-start.c:308:16 (libc.so.6+0x2409a) + + Previous write of size 4 at 0x7ba40000f050 by thread T14: + #0 wakeup_socket /builds/isc-projects/bind9/lib/isc/unix/socket.c:1126:24 (libisc.so.1105+0x70181) + #1 process_ctlfd /builds/isc-projects/bind9/lib/isc/unix/socket.c:4252:3 (libisc.so.1105+0x6c048) + #2 process_fds /builds/isc-projects/bind9/lib/isc/unix/socket.c:4156:10 (libisc.so.1105+0x6bcb4) + #3 watcher /builds/isc-projects/bind9/lib/isc/unix/socket.c:4395:10 (libisc.so.1105+0x64188) + + Location is heap block of size 16385 at 0x7ba40000f000 allocated by main thread: + #0 malloc (dispatch_test+0x42b7c4) + #1 internal_memalloc /builds/isc-projects/bind9/lib/isc/mem.c:887:8 (libisc.so.1105+0x37e38) + #2 mem_get /builds/isc-projects/bind9/lib/isc/mem.c:792:8 (libisc.so.1105+0x32c2c) + #3 isc___mem_get /builds/isc-projects/bind9/lib/isc/mem.c:1310:9 (libisc.so.1105+0x325d7) + #4 isc__mem_get /builds/isc-projects/bind9/lib/isc/mem.c:3012:11 (libisc.so.1105+0x34f80) + #5 isc__socketmgr_create2 /builds/isc-projects/bind9/lib/isc/unix/socket.c:4697:21 (libisc.so.1105+0x6374f) + #6 isc__socketmgr_create /builds/isc-projects/bind9/lib/isc/unix/socket.c:4651:10 (libisc.so.1105+0x635f2) + #7 isc_socketmgr_create /builds/isc-projects/bind9/lib/isc/unix/./../socket_api.c:74:11 (libisc.so.1105+0x6a2c7) + #8 create_managers /builds/isc-projects/bind9/lib/dns/tests/dnstest.c:120:2 (dispatch_test+0x4bb28a) + #9 dns_test_begin /builds/isc-projects/bind9/lib/dns/tests/dnstest.c:192:3 (dispatch_test+0x4bb182) + #10 _setup /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:53:11 (dispatch_test+0x4b9ff8) + #11 (libcmocka.so.0+0x51e2) + #12 __libc_start_main /build/glibc-vjB4T1/glibc-2.28/csu/../csu/libc-start.c:308:16 (libc.so.6+0x2409a) + + Mutex M1100 (0x7b5000000230) created at: + #0 pthread_mutex_init (dispatch_test+0x42e60d) + #1 isc__mutex_init /builds/isc-projects/bind9/lib/isc/pthreads/mutex.c:287:8 (libisc.so.1105+0x72317) + #2 dns_dispatchmgr_create /builds/isc-projects/bind9/lib/dns/dispatch.c:1778:11 (libdns.so.1110+0x6a055) + #3 make_dispatchset /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:75:11 (dispatch_test+0x4ba883) + #4 dispatchset_create /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:115:11 (dispatch_test+0x4b9f99) + #5 (libcmocka.so.0+0x50d8) + #6 __libc_start_main /build/glibc-vjB4T1/glibc-2.28/csu/../csu/libc-start.c:308:16 (libc.so.6+0x2409a) + + Mutex M75 (0x7bb800000320) created at: + #0 pthread_mutex_init (dispatch_test+0x42e60d) + #1 isc__mutex_init /builds/isc-projects/bind9/lib/isc/pthreads/mutex.c:287:8 (libisc.so.1105+0x72317) + #2 isc__socketmgr_create2 /builds/isc-projects/bind9/lib/isc/unix/socket.c:4728:12 (libisc.so.1105+0x63914) + #3 isc__socketmgr_create /builds/isc-projects/bind9/lib/isc/unix/socket.c:4651:10 (libisc.so.1105+0x635f2) + #4 isc_socketmgr_create /builds/isc-projects/bind9/lib/isc/unix/./../socket_api.c:74:11 (libisc.so.1105+0x6a2c7) + #5 create_managers /builds/isc-projects/bind9/lib/dns/tests/dnstest.c:120:2 (dispatch_test+0x4bb28a) + #6 dns_test_begin /builds/isc-projects/bind9/lib/dns/tests/dnstest.c:192:3 (dispatch_test+0x4bb182) + #7 _setup /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:53:11 (dispatch_test+0x4b9ff8) + #8 (libcmocka.so.0+0x51e2) + #9 __libc_start_main /build/glibc-vjB4T1/glibc-2.28/csu/../csu/libc-start.c:308:16 (libc.so.6+0x2409a) + + Thread T14 'isc-socket' (tid=1969, running) created by main thread at: + #0 pthread_create (dispatch_test+0x42d08b) + #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1105+0x72488) + #2 isc__socketmgr_create2 /builds/isc-projects/bind9/lib/isc/unix/socket.c:4787:6 (libisc.so.1105+0x63cc6) + #3 isc__socketmgr_create /builds/isc-projects/bind9/lib/isc/unix/socket.c:4651:10 (libisc.so.1105+0x635f2) + #4 isc_socketmgr_create /builds/isc-projects/bind9/lib/isc/unix/./../socket_api.c:74:11 (libisc.so.1105+0x6a2c7) + #5 create_managers /builds/isc-projects/bind9/lib/dns/tests/dnstest.c:120:2 (dispatch_test+0x4bb28a) + #6 dns_test_begin /builds/isc-projects/bind9/lib/dns/tests/dnstest.c:192:3 (dispatch_test+0x4bb182) + #7 _setup /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:53:11 (dispatch_test+0x4b9ff8) + #8 (libcmocka.so.0+0x51e2) + #9 __libc_start_main /build/glibc-vjB4T1/glibc-2.28/csu/../csu/libc-start.c:308:16 (libc.so.6+0x2409a) + +SUMMARY: ThreadSanitizer: data race /builds/isc-projects/bind9/lib/isc/unix/socket.c:3009:29 in socket_create +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/d9a2cccc974f33a29cf79d2fe2e8d8afab01b676 +--- + lib/isc/unix/socket.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/lib/isc/unix/socket.c b/lib/isc/unix/socket.c +index d250a6f1fd..219f4e3f92 100644 +--- a/lib/isc/unix/socket.c ++++ b/lib/isc/unix/socket.c +@@ -1121,9 +1121,10 @@ wakeup_socket(isc__socketmgr_t *manager, int fd, int msg) { + INSIST(fd >= 0 && fd < (int)manager->maxsocks); + + if (msg == SELECT_POKE_CLOSE) { +- /* No one should be updating fdstate, so no need to lock it */ ++ LOCK(&manager->fdlock[lockid]); + INSIST(manager->fdstate[fd] == CLOSE_PENDING); + manager->fdstate[fd] = CLOSED; ++ UNLOCK(&manager->fdlock[lockid]); + (void)unwatch_fd(manager, fd, SELECT_POKE_READ); + (void)unwatch_fd(manager, fd, SELECT_POKE_WRITE); + (void)close(fd); +-- +2.23.0 + diff --git a/backport-0009-Lock-access-when-updating-reading-manager-epoll_even.patch b/backport-0009-Lock-access-when-updating-reading-manager-epoll_even.patch new file mode 100644 index 0000000..2e15035 --- /dev/null +++ b/backport-0009-Lock-access-when-updating-reading-manager-epoll_even.patch @@ -0,0 +1,124 @@ +From 9b663419711ee2beb92c1c34a79669b251e7562d Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 20 Aug 2020 12:46:24 +1000 +Subject: [PATCH] Lock access when updating/reading manager->epoll_events[fd] + +WARNING: ThreadSanitizer: data race (pid=110) + Write of size 4 at 0x7ba400014050 by main thread (mutexes: write M1100, write M75): + #0 socket_create /builds/isc-projects/bind9/lib/isc/unix/socket.c:3012:34 (libisc.so.1105+0x6085c) + #1 isc__socket_create /builds/isc-projects/bind9/lib/isc/unix/socket.c:3044:10 (libisc.so.1105+0x60583) + #2 isc_socket_create /builds/isc-projects/bind9/lib/isc/unix/./../socket_api.c:105:11 (libisc.so.1105+0x6a4a0) + #3 open_socket /builds/isc-projects/bind9/lib/dns/dispatch.c:1708:12 (libdns.so.1110+0x7491c) + #4 get_udpsocket /builds/isc-projects/bind9/lib/dns/dispatch.c:2904:13 (libdns.so.1110+0x745ec) + #5 dispatch_createudp /builds/isc-projects/bind9/lib/dns/dispatch.c:2994:12 (libdns.so.1110+0x6e159) + #6 dns_dispatch_getudp_dup /builds/isc-projects/bind9/lib/dns/dispatch.c:2823:11 (libdns.so.1110+0x6d8f2) + #7 dns_dispatch_getudp /builds/isc-projects/bind9/lib/dns/dispatch.c:2849:10 (libdns.so.1110+0x6e99e) + #8 make_dispatchset /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:81:11 (dispatch_test+0x4ba8fc) + #9 dispatchset_create /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:115:11 (dispatch_test+0x4b9f99) + #10 (libcmocka.so.0+0x50d8) + #11 __libc_start_main /build/glibc-vjB4T1/glibc-2.28/csu/../csu/libc-start.c:308:16 (libc.so.6+0x2409a) + + Previous write of size 4 at 0x7ba400014050 by thread T14: + #0 unwatch_fd /builds/isc-projects/bind9/lib/isc/unix/socket.c (libisc.so.1105+0x6b913) + #1 wakeup_socket /builds/isc-projects/bind9/lib/isc/unix/socket.c:1128:9 (libisc.so.1105+0x701fc) + #2 process_ctlfd /builds/isc-projects/bind9/lib/isc/unix/socket.c:4253:3 (libisc.so.1105+0x6c048) + #3 process_fds /builds/isc-projects/bind9/lib/isc/unix/socket.c:4157:10 (libisc.so.1105+0x6bcb4) + #4 watcher /builds/isc-projects/bind9/lib/isc/unix/socket.c:4396:10 (libisc.so.1105+0x64188) + + Location is heap block of size 16385 at 0x7ba400014000 allocated by main thread: + #0 malloc (dispatch_test+0x42b7c4) + #1 internal_memalloc /builds/isc-projects/bind9/lib/isc/mem.c:887:8 (libisc.so.1105+0x37e38) + #2 mem_get /builds/isc-projects/bind9/lib/isc/mem.c:792:8 (libisc.so.1105+0x32c2c) + #3 isc___mem_get /builds/isc-projects/bind9/lib/isc/mem.c:1310:9 (libisc.so.1105+0x325d7) + #4 isc__mem_get /builds/isc-projects/bind9/lib/isc/mem.c:3012:11 (libisc.so.1105+0x34f80) + #5 isc__socketmgr_create2 /builds/isc-projects/bind9/lib/isc/unix/socket.c:4704:26 (libisc.so.1105+0x6379a) + #6 isc__socketmgr_create /builds/isc-projects/bind9/lib/isc/unix/socket.c:4652:10 (libisc.so.1105+0x635f2) + #7 isc_socketmgr_create /builds/isc-projects/bind9/lib/isc/unix/./../socket_api.c:74:11 (libisc.so.1105+0x6a2c7) + #8 create_managers /builds/isc-projects/bind9/lib/dns/tests/dnstest.c:120:2 (dispatch_test+0x4bb28a) + #9 dns_test_begin /builds/isc-projects/bind9/lib/dns/tests/dnstest.c:192:3 (dispatch_test+0x4bb182) + #10 _setup /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:53:11 (dispatch_test+0x4b9ff8) + #11 (libcmocka.so.0+0x51e2) + #12 __libc_start_main /build/glibc-vjB4T1/glibc-2.28/csu/../csu/libc-start.c:308:16 (libc.so.6+0x2409a) + + Mutex M1100 (0x7b5000000230) created at: + #0 pthread_mutex_init (dispatch_test+0x42e60d) + #1 isc__mutex_init /builds/isc-projects/bind9/lib/isc/pthreads/mutex.c:287:8 (libisc.so.1105+0x72377) + #2 dns_dispatchmgr_create /builds/isc-projects/bind9/lib/dns/dispatch.c:1778:11 (libdns.so.1110+0x6a055) + #3 make_dispatchset /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:75:11 (dispatch_test+0x4ba883) + #4 dispatchset_create /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:115:11 (dispatch_test+0x4b9f99) + #5 (libcmocka.so.0+0x50d8) + #6 __libc_start_main /build/glibc-vjB4T1/glibc-2.28/csu/../csu/libc-start.c:308:16 (libc.so.6+0x2409a) + + Mutex M75 (0x7bb800000320) created at: + #0 pthread_mutex_init (dispatch_test+0x42e60d) + #1 isc__mutex_init /builds/isc-projects/bind9/lib/isc/pthreads/mutex.c:287:8 (libisc.so.1105+0x72377) + #2 isc__socketmgr_create2 /builds/isc-projects/bind9/lib/isc/unix/socket.c:4729:12 (libisc.so.1105+0x63914) + #3 isc__socketmgr_create /builds/isc-projects/bind9/lib/isc/unix/socket.c:4652:10 (libisc.so.1105+0x635f2) + #4 isc_socketmgr_create /builds/isc-projects/bind9/lib/isc/unix/./../socket_api.c:74:11 (libisc.so.1105+0x6a2c7) + #5 create_managers /builds/isc-projects/bind9/lib/dns/tests/dnstest.c:120:2 (dispatch_test+0x4bb28a) + #6 dns_test_begin /builds/isc-projects/bind9/lib/dns/tests/dnstest.c:192:3 (dispatch_test+0x4bb182) + #7 _setup /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:53:11 (dispatch_test+0x4b9ff8) + #8 (libcmocka.so.0+0x51e2) + #9 __libc_start_main /build/glibc-vjB4T1/glibc-2.28/csu/../csu/libc-start.c:308:16 (libc.so.6+0x2409a) + + Thread T14 'isc-socket' (tid=150, running) created by main thread at: + #0 pthread_create (dispatch_test+0x42d08b) + #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1105+0x724e8) + #2 isc__socketmgr_create2 /builds/isc-projects/bind9/lib/isc/unix/socket.c:4788:6 (libisc.so.1105+0x63cc6) + #3 isc__socketmgr_create /builds/isc-projects/bind9/lib/isc/unix/socket.c:4652:10 (libisc.so.1105+0x635f2) + #4 isc_socketmgr_create /builds/isc-projects/bind9/lib/isc/unix/./../socket_api.c:74:11 (libisc.so.1105+0x6a2c7) + #5 create_managers /builds/isc-projects/bind9/lib/dns/tests/dnstest.c:120:2 (dispatch_test+0x4bb28a) + #6 dns_test_begin /builds/isc-projects/bind9/lib/dns/tests/dnstest.c:192:3 (dispatch_test+0x4bb182) + #7 _setup /builds/isc-projects/bind9/lib/dns/tests/dispatch_test.c:53:11 (dispatch_test+0x4b9ff8) + #8 (libcmocka.so.0+0x51e2) + #9 __libc_start_main /build/glibc-vjB4T1/glibc-2.28/csu/../csu/libc-start.c:308:16 (libc.so.6+0x2409a) + +SUMMARY: ThreadSanitizer: data race /builds/isc-projects/bind9/lib/isc/unix/socket.c:3012:34 in socket_create +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/9b663419711ee2beb92c1c34a79669b251e7562d +--- + lib/isc/unix/socket.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/lib/isc/unix/socket.c b/lib/isc/unix/socket.c +index 219f4e3f92..2b034d1762 100644 +--- a/lib/isc/unix/socket.c ++++ b/lib/isc/unix/socket.c +@@ -957,14 +957,17 @@ watch_fd(isc__socketmgr_t *manager, int fd, int msg) { + uint32_t oldevents; + int ret; + int op; ++ int lockid = FDLOCK_ID(fd); + + oldevents = manager->epoll_events[fd]; ++ LOCK(&manager->fdlock[lockid]); + if (msg == SELECT_POKE_READ) + manager->epoll_events[fd] |= EPOLLIN; + else + manager->epoll_events[fd] |= EPOLLOUT; + + event.events = manager->epoll_events[fd]; ++ UNLOCK(&manager->fdlock[lockid]); + memset(&event.data, 0, sizeof(event.data)); + event.data.fd = fd; + +@@ -1036,13 +1039,15 @@ unwatch_fd(isc__socketmgr_t *manager, int fd, int msg) { + struct epoll_event event; + int ret; + int op; ++ int lockid = FDLOCK_ID(fd); + ++ LOCK(&manager->fdlock[lockid]); + if (msg == SELECT_POKE_READ) + manager->epoll_events[fd] &= ~(EPOLLIN); + else + manager->epoll_events[fd] &= ~(EPOLLOUT); +- + event.events = manager->epoll_events[fd]; ++ UNLOCK(&manager->fdlock[lockid]); + memset(&event.data, 0, sizeof(event.data)); + event.data.fd = fd; + +-- +2.23.0 + diff --git a/backport-0010-Take-complete-ownership-of-aclp-before-calling-destr.patch b/backport-0010-Take-complete-ownership-of-aclp-before-calling-destr.patch new file mode 100644 index 0000000..8bdcc7d --- /dev/null +++ b/backport-0010-Take-complete-ownership-of-aclp-before-calling-destr.patch @@ -0,0 +1,31 @@ +From 8b80f409934078507f7f566ea4c1f281c0354a27 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Sat, 22 Aug 2020 14:39:17 +1000 +Subject: [PATCH] Take complete ownership of *aclp before calling destroy. + +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/8b80f409934078507f7f566ea4c1f281c0354a27 +--- + lib/dns/acl.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/lib/dns/acl.c b/lib/dns/acl.c +index 3fc8ab78a0..3d7c85d92f 100644 +--- a/lib/dns/acl.c ++++ b/lib/dns/acl.c +@@ -565,10 +565,11 @@ dns_acl_detach(dns_acl_t **aclp) { + + REQUIRE(DNS_ACL_VALID(acl)); + ++ *aclp = NULL; ++ + isc_refcount_decrement(&acl->refcount, &refs); + if (refs == 0) + destroy(acl); +- *aclp = NULL; + } + + +-- +2.23.0 + diff --git a/backport-0011-Take-complete-ownership-of-validatorp-before-calling.patch b/backport-0011-Take-complete-ownership-of-validatorp-before-calling.patch new file mode 100644 index 0000000..a25a071 --- /dev/null +++ b/backport-0011-Take-complete-ownership-of-validatorp-before-calling.patch @@ -0,0 +1,36 @@ +From d776a73362158d7414a6864b882461aae8f9d477 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Mon, 24 Aug 2020 13:34:54 +1000 +Subject: [PATCH] Take complete ownership of *validatorp before calling + destroy. + +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/d776a73362158d7414a6864b882461aae8f9d477 +--- + lib/dns/validator.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/lib/dns/validator.c b/lib/dns/validator.c +index ef5fd255e7..c8ee09172a 100644 +--- a/lib/dns/validator.c ++++ b/lib/dns/validator.c +@@ -3907,6 +3907,7 @@ dns_validator_destroy(dns_validator_t **validatorp) { + + REQUIRE(validatorp != NULL); + val = *validatorp; ++ *validatorp = NULL; + REQUIRE(VALID_VALIDATOR(val)); + + LOCK(&val->lock); +@@ -3920,8 +3921,6 @@ dns_validator_destroy(dns_validator_t **validatorp) { + + if (want_destroy) + destroy(val); +- +- *validatorp = NULL; + } + + static void +-- +2.23.0 + diff --git a/backport-0012-Address-lock-order-inversion.patch b/backport-0012-Address-lock-order-inversion.patch new file mode 100644 index 0000000..864dc52 --- /dev/null +++ b/backport-0012-Address-lock-order-inversion.patch @@ -0,0 +1,153 @@ +From 3ed7746ec92e69a9c37b4b34c6f0fce06b7ba592 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Mon, 24 Aug 2020 11:44:09 +1000 +Subject: [PATCH] Address lock-order-inversion + +Obtain references to view->redirect and view->managed_keys then +release view->lock so dns_zone_setviewcommit and dns_zone_setviewrevert +can obtain the view->lock while holding zone->lock. + +WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) (pid=9132) + Cycle in lock order graph: M987831431424375936 (0x000000000000) => M1012319771577875480 (0x000000000000) => M987831431424375936 + + Mutex M1012319771577875480 acquired here while holding mutex M987831431424375936 in thread T2: + #0 pthread_mutex_lock (named+0x4642a6) + #1 dns_zone_setviewcommit /builds/isc-projects/bind9/lib/dns/zone.c:1571:2 (libdns.so.1110+0x1d74eb) + #2 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2388:3 (libdns.so.1110+0x1cfe29) + #3 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) + #4 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) + #5 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) + #6 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) + #7 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) + #8 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) + #9 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) + + Mutex M987831431424375936 previously acquired by the same thread here: + #0 pthread_mutex_lock (named+0x4642a6) + #1 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2382:2 (libdns.so.1110+0x1cfde7) + #2 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) + #3 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) + #4 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) + #5 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) + #6 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) + #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) + #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) + + Mutex M987831431424375936 acquired here while holding mutex M1012319771577875480 in thread T7: + #0 pthread_mutex_lock (named+0x4642a6) + #1 dns_view_findzonecut2 /builds/isc-projects/bind9/lib/dns/view.c:1300:2 (libdns.so.1110+0x1cc93a) + #2 dns_view_findzonecut /builds/isc-projects/bind9/lib/dns/view.c:1261:9 (libdns.so.1110+0x1cc864) + #3 fctx_create /builds/isc-projects/bind9/lib/dns/resolver.c:4459:13 (libdns.so.1110+0x1779d3) + #4 dns_resolver_createfetch3 /builds/isc-projects/bind9/lib/dns/resolver.c:9628:12 (libdns.so.1110+0x176cb6) + #5 dns_resolver_createfetch /builds/isc-projects/bind9/lib/dns/resolver.c:9504:10 (libdns.so.1110+0x174e17) + #6 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:10061:12 (libdns.so.1110+0x2055a5) + #7 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) + #8 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) + #9 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) + #10 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) + + Mutex M1012319771577875480 previously acquired by the same thread here: + #0 pthread_mutex_lock (named+0x4642a6) + #1 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:9951:2 (libdns.so.1110+0x204dc3) + #2 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) + #3 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) + #4 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) + #5 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) + + Thread T2 'isc-worker0001' (tid=9163, running) created by main thread at: + #0 pthread_create (named+0x446edb) + #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) + #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) + #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) + #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) + #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) + #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) + + Thread T7 'isc-worker0006' (tid=9168, running) created by main thread at: + #0 pthread_create (named+0x446edb) + #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) + #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) + #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) + #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) + #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) + #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) + +SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) (/builds/isc-projects/bind9/bin/named/.libs/named+0x4642a6) in pthread_mutex_lock +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/3ed7746ec92e69a9c37b4b34c6f0fce06b7ba592 +--- + lib/dns/view.c | 28 ++++++++++++++++++++++++---- + 1 file changed, 24 insertions(+), 4 deletions(-) + +diff --git a/lib/dns/view.c b/lib/dns/view.c +index 006f99794d..0a5559ac9d 100644 +--- a/lib/dns/view.c ++++ b/lib/dns/view.c +@@ -2377,25 +2377,37 @@ dns_view_loadnta(dns_view_t *view) { + + void + dns_view_setviewcommit(dns_view_t *view) { ++ dns_zone_t *redirect = NULL, *managed_keys = NULL; ++ + REQUIRE(DNS_VIEW_VALID(view)); + + LOCK(&view->lock); + + if (view->redirect != NULL) { +- dns_zone_setviewcommit(view->redirect); ++ dns_zone_attach(view->redirect, &redirect); + } + if (view->managed_keys != NULL) { +- dns_zone_setviewcommit(view->managed_keys); ++ dns_zone_attach(view->managed_keys, &managed_keys); + } + if (view->zonetable != NULL) { + dns_zt_setviewcommit(view->zonetable); + } + + UNLOCK(&view->lock); ++ ++ if (redirect != NULL) { ++ dns_zone_setviewcommit(redirect); ++ dns_zone_detach(&redirect); ++ } ++ if (managed_keys != NULL) { ++ dns_zone_setviewcommit(managed_keys); ++ dns_zone_detach(&managed_keys); ++ } + } + + void + dns_view_setviewrevert(dns_view_t *view) { ++ dns_zone_t *redirect = NULL, *managed_keys = NULL; + dns_zt_t *zonetable; + + REQUIRE(DNS_VIEW_VALID(view)); +@@ -2406,14 +2418,22 @@ dns_view_setviewrevert(dns_view_t *view) { + */ + LOCK(&view->lock); + if (view->redirect != NULL) { +- dns_zone_setviewrevert(view->redirect); ++ dns_zone_attach(view->redirect, &redirect); + } + if (view->managed_keys != NULL) { +- dns_zone_setviewrevert(view->managed_keys); ++ dns_zone_attach(view->managed_keys, &managed_keys); + } + zonetable = view->zonetable; + UNLOCK(&view->lock); + ++ if (redirect != NULL) { ++ dns_zone_setviewrevert(redirect); ++ dns_zone_detach(&redirect); ++ } ++ if (managed_keys != NULL) { ++ dns_zone_setviewrevert(managed_keys); ++ dns_zone_detach(&managed_keys); ++ } + if (zonetable != NULL) { + dns_zt_setviewrevert(zonetable); + } +-- +2.23.0 + diff --git a/backport-0013-It-appears-that-you-can-t-change-what-you-are-pollin.patch b/backport-0013-It-appears-that-you-can-t-change-what-you-are-pollin.patch new file mode 100644 index 0000000..0d3fed0 --- /dev/null +++ b/backport-0013-It-appears-that-you-can-t-change-what-you-are-pollin.patch @@ -0,0 +1,69 @@ +From cf4a9d9ab87d572404e39177ee240c7618831982 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Wed, 26 Aug 2020 17:52:55 +1000 +Subject: [PATCH] It appears that you can't change what you are polling for + while connecting. + +WARNING: ThreadSanitizer: data race (pid=6465) + Read of size 8 at 0x7ba000002040 by thread T14: + #0 epoll_ctl (named+0x44ccd2) + #1 watch_fd /builds/isc-projects/bind9/lib/isc/unix/socket.c:975:8 (libisc.so.1107+0x6bd90) + #2 wakeup_socket /builds/isc-projects/bind9/lib/isc/unix/socket.c:1164:11 (libisc.so.1107+0x7057d) + #3 process_ctlfd /builds/isc-projects/bind9/lib/isc/unix/socket.c:4258:3 (libisc.so.1107+0x6c308) + #4 process_fds /builds/isc-projects/bind9/lib/isc/unix/socket.c:4162:10 (libisc.so.1107+0x6bf74) + #5 watcher /builds/isc-projects/bind9/lib/isc/unix/socket.c:4401:10 (libisc.so.1107+0x64348) + + Previous write of size 8 at 0x7ba000002040 by thread T9 (mutexes: write M81481868977181736): + #0 connect (named+0x44b7e0) + #1 isc__socket_connect /builds/isc-projects/bind9/lib/isc/unix/socket.c:5902:7 (libisc.so.1107+0x67a79) + #2 isc_socket_connect /builds/isc-projects/bind9/lib/isc/unix/./../socket_api.c:169:11 (libisc.so.1107+0x6aa4b) + #3 resquery_send /builds/isc-projects/bind9/lib/dns/resolver.c:2573:13 (libdns.so.1110+0x18570b) + #4 fctx_query /builds/isc-projects/bind9/lib/dns/resolver.c:1903:12 (libdns.so.1110+0x1815a3) + #5 fctx_try /builds/isc-projects/bind9/lib/dns/resolver.c:3863:11 (libdns.so.1110+0x17e3a9) + #6 fctx_start /builds/isc-projects/bind9/lib/dns/resolver.c:4219:4 (libdns.so.1110+0x178833) + #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507f5) + #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d749) + + Location is file descriptor 516 created by thread T9 at: + #0 connect (named+0x44b7e0) + #1 isc__socket_connect /builds/isc-projects/bind9/lib/isc/unix/socket.c:5902:7 (libisc.so.1107+0x67a79) + #2 isc_socket_connect /builds/isc-projects/bind9/lib/isc/unix/./../socket_api.c:169:11 (libisc.so.1107+0x6aa4b) + #3 resquery_send /builds/isc-projects/bind9/lib/dns/resolver.c:2573:13 (libdns.so.1110+0x18570b) + #4 fctx_query /builds/isc-projects/bind9/lib/dns/resolver.c:1903:12 (libdns.so.1110+0x1815a3) + #5 fctx_try /builds/isc-projects/bind9/lib/dns/resolver.c:3863:11 (libdns.so.1110+0x17e3a9) + #6 fctx_start /builds/isc-projects/bind9/lib/dns/resolver.c:4219:4 (libdns.so.1110+0x178833) + #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507f5) + #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d749) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/cf4a9d9ab87d572404e39177ee240c7618831982 +--- + lib/isc/unix/socket.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/lib/isc/unix/socket.c b/lib/isc/unix/socket.c +index 0ebdf4c345..692f3d83f9 100644 +--- a/lib/isc/unix/socket.c ++++ b/lib/isc/unix/socket.c +@@ -967,12 +967,18 @@ watch_fd(isc__socketmgr_t *manager, int fd, int msg) { + manager->epoll_events[fd] |= EPOLLOUT; + + event.events = manager->epoll_events[fd]; +- UNLOCK(&manager->fdlock[lockid]); + memset(&event.data, 0, sizeof(event.data)); + event.data.fd = fd; + + op = (oldevents == 0U) ? EPOLL_CTL_ADD : EPOLL_CTL_MOD; ++ if (manager->fds[fd] != NULL) { ++ LOCK(&manager->fds[fd]->lock); ++ } + ret = epoll_ctl(manager->epoll_fd, op, fd, &event); ++ if (manager->fds[fd] != NULL) { ++ UNLOCK(&manager->fds[fd]->lock); ++ } ++ UNLOCK(&manager->fdlock[lockid]); + if (ret == -1) { + if (errno == EEXIST) + UNEXPECTED_ERROR(__FILE__, __LINE__, +-- +2.23.0 + diff --git a/backport-0014-counter-used-was-read-without-the-lock-being-held.patch b/backport-0014-counter-used-was-read-without-the-lock-being-held.patch new file mode 100644 index 0000000..0478935 --- /dev/null +++ b/backport-0014-counter-used-was-read-without-the-lock-being-held.patch @@ -0,0 +1,49 @@ +From 15ae4585d278d8a137d038636f39acc62ab67743 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 27 Aug 2020 11:18:50 +1000 +Subject: [PATCH] `counter->used` was read without the lock being held. + +WARNING: ThreadSanitizer: data race (pid=11785) + Write of size 4 at 0x7b180001ba10 by thread T12 (mutexes: write M835834548863482336): + #0 isc_counter_increment /builds/isc-projects/bind9/lib/isc/counter.c:70:15 (libisc.so.1107+0x1dcb6) + #1 fctx_try /builds/isc-projects/bind9/lib/dns/resolver.c:3851:11 (libdns.so.1110+0x17e312) + #2 resume_dslookup /builds/isc-projects/bind9/lib/dns/resolver.c:7505:3 (libdns.so.1110+0x18ccf0) + #3 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507f5) + #4 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d749) + + Previous read of size 4 at 0x7b180001ba10 by thread T7: + #0 isc_counter_used /builds/isc-projects/bind9/lib/isc/counter.c:82:19 (libisc.so.1107+0x1dd5f) + #1 fctx_try /builds/isc-projects/bind9/lib/dns/resolver.c:3798:6 (libdns.so.1110+0x17e0d1) + #2 fctx_start /builds/isc-projects/bind9/lib/dns/resolver.c:4219:4 (libdns.so.1110+0x178833) + #3 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507f5) + #4 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d749) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/15ae4585d278d8a137d038636f39acc62ab67743 +--- + lib/isc/counter.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/lib/isc/counter.c b/lib/isc/counter.c +index 8c70051837..e7cba062f2 100644 +--- a/lib/isc/counter.c ++++ b/lib/isc/counter.c +@@ -77,9 +77,15 @@ isc_counter_increment(isc_counter_t *counter) { + + unsigned int + isc_counter_used(isc_counter_t *counter) { ++ unsigned int used; ++ + REQUIRE(VALID_COUNTER(counter)); + +- return (counter->used); ++ LOCK(&counter->lock); ++ used = counter->used; ++ UNLOCK(&counter->lock); ++ ++ return (used); + } + + void +-- +2.23.0 + diff --git a/backport-0015-Missing-locks-in-ns_lwresd_shutdown.patch b/backport-0015-Missing-locks-in-ns_lwresd_shutdown.patch new file mode 100644 index 0000000..c54b283 --- /dev/null +++ b/backport-0015-Missing-locks-in-ns_lwresd_shutdown.patch @@ -0,0 +1,46 @@ +From 818520216d85c7e86f05de76449782fabaa31fa1 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 27 Aug 2020 14:41:30 +1000 +Subject: [PATCH] Missing locks in ns_lwresd_shutdown. + +WARNING: ThreadSanitizer: data race + Read of size 8 at 0x000000000001 by main thread: + #0 ns_lwresd_shutdown bin/named/lwresd.c:885:3 + #1 destroy_managers bin/named/./main.c:938:2 + #2 cleanup bin/named/./main.c:1346:2 + #3 main bin/named/./main.c:1594:2 + + Previous write of size 8 at 0x000000000001 by thread T1 (mutexes: write M1): + #0 configure_listener bin/named/lwresd.c:768:2 + #1 ns_lwresd_configure bin/named/lwresd.c:836:5 + #2 load_configuration bin/named/./server.c:8230:2 + #3 run_server bin/named/./server.c + #4 dispatch lib/isc/task.c:1157:7 + #5 run lib/isc/task.c:1331:2 +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/818520216d85c7e86f05de76449782fabaa31fa1 +--- + bin/named/lwresd.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/bin/named/lwresd.c b/bin/named/lwresd.c +index 1f834cb38f..499314110e 100644 +--- a/bin/named/lwresd.c ++++ b/bin/named/lwresd.c +@@ -880,9 +880,13 @@ ns_lwresd_shutdown(void) { + + RUNTIME_CHECK(isc_once_do(&once, initialize_mutex) == ISC_R_SUCCESS); + ++ LOCK(&listeners_lock); + while (!ISC_LIST_EMPTY(listeners)) { + listener = ISC_LIST_HEAD(listeners); + ISC_LIST_UNLINK(listeners, listener, link); ++ UNLOCK(&listeners_lock); + ns_lwreslistener_detach(&listener); ++ LOCK(&listeners_lock); + } ++ UNLOCK(&listeners_lock); + } +-- +2.23.0 + diff --git a/backport-0016-Use-atomics-to-update-counters.patch b/backport-0016-Use-atomics-to-update-counters.patch new file mode 100644 index 0000000..36052a5 --- /dev/null +++ b/backport-0016-Use-atomics-to-update-counters.patch @@ -0,0 +1,107 @@ +From 063a881a6c6c6bd542b0c24e1fb2882f5b7b264e Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 27 Aug 2020 15:12:10 +1000 +Subject: [PATCH] Use atomics to update counters. + +WARNING: ThreadSanitizer: data race + Write of size 4 at 0x000000000001 by thread T1: + #0 dns_acache_countquerymiss lib/dns/acache.c:1184:22 + #1 rdataset_getadditional lib/dns/rbtdb.c:9875:3 + #2 dns_rdataset_getadditional lib/dns/rdataset.c:711:11 + #3 query_addadditional2 bin/named/query.c:1967:11 + #4 additionaldata_ns lib/dns/./rdata/generic/ns_2.c:198:10 + #5 dns_rdata_additionaldata lib/dns/rdata.c:1246:2 + #6 dns_rdataset_additionaldata lib/dns/rdataset.c:629:12 + #7 query_addrdataset bin/named/query.c:2411:8 + #8 query_addrrset bin/named/query.c:2802:2 + #9 query_find bin/named/query.c:9135:4 + #10 query_resume bin/named/query.c:4164:12 + #11 dispatch lib/isc/task.c:1157:7 + #12 run lib/isc/task.c:1331:2 + + Previous write of size 4 at 0x000000000001 by thread T2: + #0 dns_acache_countquerymiss lib/dns/acache.c:1184:22 + #1 rdataset_getadditional lib/dns/rbtdb.c:9875:3 + #2 dns_rdataset_getadditional lib/dns/rdataset.c:711:11 + #3 query_addadditional2 bin/named/query.c:1967:11 + #4 additionaldata_ns lib/dns/./rdata/generic/ns_2.c:198:10 + #5 dns_rdata_additionaldata lib/dns/rdata.c:1246:2 + #6 dns_rdataset_additionaldata lib/dns/rdataset.c:629:12 + #7 query_addrdataset bin/named/query.c:2411:8 + #8 query_addrrset bin/named/query.c:2802:2 + #9 query_find bin/named/query.c:9135:4 + #10 query_resume bin/named/query.c:4164:12 + #11 dispatch lib/isc/task.c:1157:7 + #12 run lib/isc/task.c:1331:2 +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/063a881a6c6c6bd542b0c24e1fb2882f5b7b264e +--- + lib/dns/acache.c | 28 ++++++++++++++++++++++------ + 1 file changed, 22 insertions(+), 6 deletions(-) + +diff --git a/lib/dns/acache.c b/lib/dns/acache.c +index fc6973f4cc..14ba672135 100644 +--- a/lib/dns/acache.c ++++ b/lib/dns/acache.c +@@ -174,9 +174,24 @@ struct acache_cleaner { + }; + + struct dns_acachestats { ++#ifdef ACACHE_HAVESTDATOMIC ++ _Atomic(unsigned int) hits; ++ _Atomic(unsigned int) queries; ++ _Atomic(unsigned int) misses; ++#define ACACHE_INC(x) atomic_fetch_add(&(x), 1) ++#define ACACHE_LOAD(x) atomic_load(&(x)) ++#else + unsigned int hits; + unsigned int queries; + unsigned int misses; ++#if defined(ISC_PLATFORM_HAVEXADD) ++#define ACACHE_INC(x) isc_atomic_xadd((int32_t*)&(x), 1) ++#define ACACHE_LOAD(x) isc_atomic_xadd((int32_t*)&(x), 0) ++#else ++#define ACACHE_INC(x) ((x)++) ++#define ACACHE_LOAD(x) (x) ++#endif ++#endif + unsigned int adds; + unsigned int deleted; + unsigned int cleaned; +@@ -716,8 +731,9 @@ end_cleaning(acache_cleaner_t *cleaner, isc_event_t *event) { + "cleaned=%d cleaner_runs=%d overmem=%d " + "overmem_nocreates=%d nomem=%d", + acache, +- acache->stats.hits, acache->stats.misses, +- acache->stats.queries, ++ ACACHE_LOAD(acache->stats.hits), ++ ACACHE_LOAD(acache->stats.misses), ++ ACACHE_LOAD(acache->stats.queries), + acache->stats.adds, acache->stats.deleted, + acache->stats.cleaned, acache->stats.cleaner_runs, + acache->stats.overmem, acache->stats.overmem_nocreates, +@@ -1181,8 +1197,8 @@ dns_acache_attach(dns_acache_t *source, dns_acache_t **targetp) { + + void + dns_acache_countquerymiss(dns_acache_t *acache) { +- acache->stats.misses++; /* XXXSK danger: unlocked! */ +- acache->stats.queries++; /* XXXSK danger: unlocked! */ ++ ACACHE_INC(acache->stats.misses); ++ ACACHE_INC(acache->stats.queries); + } + + void +@@ -1529,8 +1545,8 @@ dns_acache_getentry(dns_acacheentry_t *entry, dns_zone_t **zonep, + } + } + +- entry->acache->stats.hits++; /* XXXMLG danger: unlocked! */ +- entry->acache->stats.queries++; ++ ACACHE_INC(entry->acache->stats.hits); ++ ACACHE_INC(entry->acache->stats.queries); + + ACACHE_UNLOCK(&acache->entrylocks[locknum], isc_rwlocktype_read); + +-- +2.23.0 + diff --git a/backport-0017-Obtain-a-lock-on-the-quota-structure.patch b/backport-0017-Obtain-a-lock-on-the-quota-structure.patch new file mode 100644 index 0000000..9625f79 --- /dev/null +++ b/backport-0017-Obtain-a-lock-on-the-quota-structure.patch @@ -0,0 +1,48 @@ +From be4ed416117bb155aae899cdeb644a8fd4e6457f Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 27 Aug 2020 18:53:06 +1000 +Subject: [PATCH] Obtain a lock on the quota structure. + +WARNING: ThreadSanitizer: data race (pid=15228) + Read of size 4 at 0x7b5c00000444 by thread T8: + #0 ns_server_status /builds/isc-projects/bind9/bin/named/./server.c:10935:31 (named+0x514671) + #1 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:263:12 (named+0x4e4726) + #2 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9046) + #3 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x50845) + #4 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d799) + + Previous write of size 4 at 0x7b5c00000444 by thread T9 (mutexes: write M1082): + #0 isc_quota_release /builds/isc-projects/bind9/lib/isc/quota.c:73:13 (libisc.so.1107+0x3bf10) + #1 isc_quota_detach /builds/isc-projects/bind9/lib/isc/quota.c:111:2 (libisc.so.1107+0x3c12b) + #2 ns_client_endrequest /builds/isc-projects/bind9/bin/named/client.c:896:3 (named+0x4dcad1) + #3 exit_check /builds/isc-projects/bind9/bin/named/client.c:512:3 (named+0x4d570d) + #4 ns_client_detach /builds/isc-projects/bind9/bin/named/client.c:3687:8 (named+0x4d7732) + #5 query_find /builds/isc-projects/bind9/bin/named/query.c (named+0x4f8ff1) + #6 query_resume /builds/isc-projects/bind9/bin/named/query.c:4164:12 (named+0x509b38) + #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x50845) + #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d799) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/be4ed416117bb155aae899cdeb644a8fd4e6457f +--- + bin/named/server.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/bin/named/server.c b/bin/named/server.c +index 6aca0224c7..1ece13b31e 100644 +--- a/bin/named/server.c ++++ b/bin/named/server.c +@@ -10931,9 +10931,11 @@ ns_server_status(ns_server_t *server, isc_buffer_t **text) { + server->log_queries ? "ON" : "OFF"); + CHECK(putstr(text, line)); + ++ LOCK(&server->recursionquota.lock); + snprintf(line, sizeof(line), "recursive clients: %d/%d/%d\n", + server->recursionquota.used, server->recursionquota.soft, + server->recursionquota.max); ++ UNLOCK(&server->recursionquota.lock); + CHECK(putstr(text, line)); + + snprintf(line, sizeof(line), "tcp clients: %d/%d\n", +-- +2.23.0 + diff --git a/backport-0018-The-node-lock-was-released-too-early.patch b/backport-0018-The-node-lock-was-released-too-early.patch new file mode 100644 index 0000000..92dda3a --- /dev/null +++ b/backport-0018-The-node-lock-was-released-too-early.patch @@ -0,0 +1,69 @@ +From a1dcb73f677969d99df3ccff2acf4737e18a72b1 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 3 Sep 2020 12:53:53 +1000 +Subject: [PATCH] The node lock was released too early. + +NEGATIVE needs to be call with the node lock held. + +WARNING: ThreadSanitizer: data race + Write of size 2 at 0x000000000001 by thread T1 (mutexes: write M1): + #0 mark_stale_header lib/dns/rbtdb.c:1802:21 + #1 add32 lib/dns/rbtdb.c:6559:5 + #2 addrdataset lib/dns/rbtdb.c:6975:12 + #3 dns_db_addrdataset lib/dns/db.c:783:10 + #4 cache_name lib/dns/resolver.c:5829:13 + #5 cache_message lib/dns/resolver.c:5926:14 + #6 resquery_response lib/dns/resolver.c:8618:12 + #7 dispatch lib/isc/task.c:1157:7 + #8 run lib/isc/task.c:1331:2 + + Previous read of size 2 at 0x000000000001 by thread T2: + #0 cache_findrdataset lib/dns/rbtdb.c:5932:6 + #1 dns_db_findrdataset lib/dns/db.c:739:10 + #2 query_addadditional2 bin/named/query.c:2196:11 + #3 additionaldata_ns lib/dns/./rdata/generic/ns_2.c:198:10 + #4 dns_rdata_additionaldata lib/dns/rdata.c:1246:2 + #5 dns_rdataset_additionaldata lib/dns/rdataset.c:629:12 + #6 query_addrdataset bin/named/query.c:2411:8 + #7 query_addrrset bin/named/query.c:2802:2 + #8 query_addbestns bin/named/query.c:3501:2 + #9 query_find bin/named/query.c:9165:4 + #10 query_resume bin/named/query.c:4164:12 + #11 dispatch lib/isc/task.c:1157:7 + #12 run lib/isc/task.c:1331:2 +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/a1dcb73f677969d99df3ccff2acf4737e18a72b1 +--- + lib/dns/rbtdb.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +diff --git a/lib/dns/rbtdb.c b/lib/dns/rbtdb.c +index 5dca432250..21bd85c322 100644 +--- a/lib/dns/rbtdb.c ++++ b/lib/dns/rbtdb.c +@@ -5924,10 +5924,10 @@ cache_findrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version, + } + } + +- NODE_UNLOCK(lock, locktype); +- +- if (found == NULL) ++ if (found == NULL) { ++ NODE_UNLOCK(lock, locktype); + return (ISC_R_NOTFOUND); ++ } + + if (NEGATIVE(found)) { + /* +@@ -5939,6 +5939,8 @@ cache_findrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version, + result = DNS_R_NCACHENXRRSET; + } + ++ NODE_UNLOCK(lock, locktype); ++ + update_cachestats(rbtdb, result); + + return (result); +-- +2.23.0 + diff --git a/backport-0019-Address-lock-order-inversion-between-the-keytable-an.patch b/backport-0019-Address-lock-order-inversion-between-the-keytable-an.patch new file mode 100644 index 0000000..9e46ef5 --- /dev/null +++ b/backport-0019-Address-lock-order-inversion-between-the-keytable-an.patch @@ -0,0 +1,108 @@ +From 7f6cddad0ca1b19c50a04a2f6568e9a9c4129504 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Fri, 4 Sep 2020 12:50:42 +1000 +Subject: [PATCH] Address lock-order-inversion between the keytable and the db + locks. + + WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) + Cycle in lock order graph: M1 (0x000000000000) => M2 (0x000000000000) => M1 + + Mutex M2 acquired here while holding mutex M1 in thread T1: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 dns_keytable_find lib/dns/keytable.c:522:2 + #3 sync_keyzone lib/dns/zone.c:4560:12 + #4 dns_zone_synckeyzone lib/dns/zone.c:4635:11 + #5 mkey_refresh bin/named/server.c:15423:2 + #6 named_server_mkeys bin/named/server.c:15727:4 + #7 named_control_docommand bin/named/control.c:236:12 + #8 control_command bin/named/controlconf.c:365:17 + #9 dispatch lib/isc/task.c:1152:7 + #10 run lib/isc/task.c:1344:2 + + Mutex M1 previously acquired by the same thread here: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 resume_iteration lib/dns/rbtdb.c:9357:2 + #3 dbiterator_first lib/dns/rbtdb.c:9407:3 + #4 dns_dbiterator_first lib/dns/dbiterator.c:43:10 + #5 dns_rriterator_first lib/dns/rriterator.c:71:15 + #6 sync_keyzone lib/dns/zone.c:4543:16 + #7 dns_zone_synckeyzone lib/dns/zone.c:4635:11 + #8 mkey_refresh bin/named/server.c:15423:2 + #9 named_server_mkeys bin/named/server.c:15727:4 + #10 named_control_docommand bin/named/control.c:236:12 + #11 control_command bin/named/controlconf.c:365:17 + #12 dispatch lib/isc/task.c:1152:7 + #13 run lib/isc/task.c:1344:2 + + Mutex M1 acquired here while holding mutex M2 in thread T1: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 zone_find lib/dns/rbtdb.c:4029:2 + #3 dns_db_find lib/dns/db.c:500:11 + #4 addifmissing lib/dns/zone.c:4481:11 + #5 dns_keytable_forall lib/dns/keytable.c:786:4 + #6 sync_keyzone lib/dns/zone.c:4586:2 + #7 dns_zone_synckeyzone lib/dns/zone.c:4635:11 + #8 mkey_refresh bin/named/server.c:15423:2 + #9 named_server_mkeys bin/named/server.c:15727:4 + #10 named_control_docommand bin/named/control.c:236:12 + #11 control_command bin/named/controlconf.c:365:17 + #12 dispatch lib/isc/task.c:1152:7 + #13 run lib/isc/task.c:1344:2 + + Mutex M2 previously acquired by the same thread here: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 dns_keytable_forall lib/dns/keytable.c:770:2 + #3 sync_keyzone lib/dns/zone.c:4586:2 + #4 dns_zone_synckeyzone lib/dns/zone.c:4635:11 + #5 mkey_refresh bin/named/server.c:15423:2 + #6 named_server_mkeys bin/named/server.c:15727:4 + #7 named_control_docommand bin/named/control.c:236:12 + #8 control_command bin/named/controlconf.c:365:17 + #9 dispatch lib/isc/task.c:1152:7 + #10 run lib/isc/task.c:1344:2 + + Thread T1 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create lib/isc/pthreads/thread.c:73:8 + #2 isc_taskmgr_create lib/isc/task.c:1434:3 + #3 create_managers bin/named/main.c:915:11 + #4 setup bin/named/main.c:1223:11 + #5 main bin/named/main.c:1523:2 + + SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) in pthread_rwlock_rdlock + +(cherry picked from commit 9e5f83c4993310f9841a4eba90d4a84dba882727) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/7f6cddad0ca1b19c50a04a2f6568e9a9c4129504 +--- + lib/dns/zone.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index 4d5f7fb9a5..e120dded9e 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -4344,9 +4344,14 @@ sync_keyzone(dns_zone_t *zone, dns_db_t *db) { + goto failure; + } + +- if (rdataset->type != dns_rdatatype_keydata) ++ if (rdataset->type != dns_rdatatype_keydata) { + continue; +- ++ } ++ /* ++ * Release db wrlock to prevent LOR reports against ++ * dns_keytable_forall() call below. ++ */ ++ dns_rriterator_pause(&rrit); + result = dns_keytable_find(sr, rrname, &keynode); + if ((result != ISC_R_SUCCESS && + result != DNS_R_PARTIALMATCH) || +-- +2.23.0 + diff --git a/backport-0020-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch b/backport-0020-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch new file mode 100644 index 0000000..32780a6 --- /dev/null +++ b/backport-0020-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch @@ -0,0 +1,88 @@ +From 508bb0859c1d489e45d89e0118cb26d64ed1abc6 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Fri, 4 Sep 2020 15:23:13 +1000 +Subject: [PATCH] Pause dbiterator to release rwlock to prevent + lock-order-inversion. + + WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) + Cycle in lock order graph: M1 (0x000000000000) => M2 (0x000000000000) => M1 + + Mutex M2 acquired here while holding mutex M1 in thread T1: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 getsigningtime lib/dns/rbtdb.c:8198:2 + #3 dns_db_getsigningtime lib/dns/db.c:979:11 + #4 set_resigntime lib/dns/zone.c:3887:11 + #5 dns_zone_markdirty lib/dns/zone.c:11115:4 + #6 update_action lib/ns/update.c:3376:3 + #7 dispatch lib/isc/task.c:1152:7 + #8 run lib/isc/task.c:1344:2 + + Mutex M1 previously acquired by the same thread here: + #0 pthread_mutex_lock + #1 dns_zone_markdirty lib/dns/zone.c:11085:2 + #2 update_action lib/ns/update.c:3376:3 + #3 dispatch lib/isc/task.c:1152:7 + #4 run lib/isc/task.c:1344:2 + + Mutex M1 acquired here while holding mutex M2 in thread T2: + #0 pthread_mutex_lock + #1 zone_nsec3chain lib/dns/zone.c:8274:3 + #2 zone_maintenance lib/dns/zone.c:11052:4 + #3 zone_timer lib/dns/zone.c:14087:2 + #4 dispatch lib/isc/task.c:1152:7 + #5 run lib/isc/task.c:1344:2 + + Mutex M2 previously acquired by the same thread here: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 resume_iteration lib/dns/rbtdb.c:9357:2 + #3 dbiterator_next lib/dns/rbtdb.c:9647:3 + #4 dns_dbiterator_next lib/dns/dbiterator.c:87:10 + #5 zone_nsec3chain lib/dns/zone.c:8412:13 + #6 zone_maintenance lib/dns/zone.c:11052:4 + #7 zone_timer lib/dns/zone.c:14087:2 + #8 dispatch lib/isc/task.c:1152:7 + #9 run lib/isc/task.c:1344:2 + + Thread T1 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create lib/isc/pthreads/thread.c:73:8 + #2 isc_taskmgr_create lib/isc/task.c:1434:3 + #3 create_managers bin/named/main.c:915:11 + #4 setup bin/named/main.c:1223:11 + #5 main bin/named/main.c:1523:2 + + Thread T2 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create lib/isc/pthreads/thread.c:73:8 + #2 isc_taskmgr_create lib/isc/task.c:1434:3 + #3 create_managers bin/named/main.c:915:11 + #4 setup bin/named/main.c:1223:11 + #5 main bin/named/main.c:1523:2 + + SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) in pthread_rwlock_rdlock + +(cherry picked from commit 98025e15d0ea05bdac55fb4aa8e342bdf6febe1a) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/508bb0859c1d489e45d89e0118cb26d64ed1abc6 +--- + lib/dns/zone.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index 257f26780c..55eb1d72ec 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -7722,6 +7722,8 @@ zone_nsec3chain(dns_zone_t *zone) { + * generated by dns__zone_updatesigs() calls later in this function. + */ + while (nsec3chain != NULL && nodes-- > 0 && signatures > 0) { ++ dns_dbiterator_pause(nsec3chain->dbiterator); ++ + LOCK_ZONE(zone); + nextnsec3chain = ISC_LIST_NEXT(nsec3chain, link); + +-- +2.23.0 + diff --git a/backport-0021-Address-lock-order-reversals-when-shutting-down-a-vi.patch b/backport-0021-Address-lock-order-reversals-when-shutting-down-a-vi.patch new file mode 100644 index 0000000..6991575 --- /dev/null +++ b/backport-0021-Address-lock-order-reversals-when-shutting-down-a-vi.patch @@ -0,0 +1,45 @@ +From a0b123a06e6cdbf931a973c91129776a90a6ce8b Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Mon, 7 Sep 2020 13:21:10 +1000 +Subject: [PATCH] Address lock order reversals when shutting down a view. + +Release view->lock before calling dns_resolver_shutdown, +dns_adb_shutdown, and dns_requestmgr_shutdown. +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/a0b123a06e6cdbf931a973c91129776a90a6ce8b +--- + lib/dns/view.c | 15 ++++++++++++--- + 1 file changed, 12 insertions(+), 3 deletions(-) + +diff --git a/lib/dns/view.c b/lib/dns/view.c +index 1726b43f9e..a7ba613990 100644 +--- a/lib/dns/view.c ++++ b/lib/dns/view.c +@@ -599,12 +599,21 @@ view_flushanddetach(dns_view_t **viewp, bool flush) { + dns_zone_t *mkzone = NULL, *rdzone = NULL; + + LOCK(&view->lock); +- if (!RESSHUTDOWN(view)) ++ if (!RESSHUTDOWN(view)) { ++ UNLOCK(&view->lock); + dns_resolver_shutdown(view->resolver); +- if (!ADBSHUTDOWN(view)) ++ LOCK(&view->lock); ++ } ++ if (!ADBSHUTDOWN(view)) { ++ UNLOCK(&view->lock); + dns_adb_shutdown(view->adb); +- if (!REQSHUTDOWN(view)) ++ LOCK(&view->lock); ++ } ++ if (!REQSHUTDOWN(view)) { ++ UNLOCK(&view->lock); + dns_requestmgr_shutdown(view->requestmgr); ++ LOCK(&view->lock); ++ } + if (view->acache != NULL) + dns_acache_shutdown(view->acache); + if (view->zonetable != NULL) { +-- +2.23.0 + diff --git a/backport-0022-Hold-qid-lock-when-calling-deref_portentry-as.patch b/backport-0022-Hold-qid-lock-when-calling-deref_portentry-as.patch new file mode 100644 index 0000000..77e6a9a --- /dev/null +++ b/backport-0022-Hold-qid-lock-when-calling-deref_portentry-as.patch @@ -0,0 +1,157 @@ +From 86d9d04fd8931e47c527cef08ae8ee89a695b707 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 22 Oct 2020 16:13:06 +1100 +Subject: [PATCH] Hold qid->lock when calling deref_portentry() as + +socket_search() need portentry to be unchanging. + + WARNING: ThreadSanitizer: data race + Write of size 8 at 0x000000000001 by thread T1 (mutexes: write M1): + #0 deref_portentry lib/dns/dispatch.c:630 + #1 deactivate_dispsocket lib/dns/dispatch.c:861 + #2 udp_recv lib/dns/dispatch.c:1105 + #3 udp_exrecv lib/dns/dispatch.c:1028 + #4 dispatch lib/isc/task.c:1152 + #5 run lib/isc/task.c:1344 + #6 + + Previous read of size 8 at 0x000000000001 by thread T2 (mutexes: write M1, write M2): + #0 socket_search lib/dns/dispatch.c:661 + #1 get_dispsocket lib/dns/dispatch.c:744 + #2 dns_dispatch_addresponse lib/dns/dispatch.c:3120 + #3 resquery_send lib/dns/resolver.c:2467 + #4 fctx_query lib/dns/resolver.c:2217 + #5 fctx_try lib/dns/resolver.c:4245 + #6 fctx_timeout lib/dns/resolver.c:4570 + #7 dispatch lib/isc/task.c:1152 + #8 run lib/isc/task.c:1344 + #9 + +(cherry picked from commit 5c253c416d0bc0cce7606667c6703f44a98e9494) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/86d9d04fd8931e47c527cef08ae8ee89a695b707 +--- + lib/dns/dispatch.c | 41 ++++++++++++++++++----------------------- + 1 file changed, 18 insertions(+), 23 deletions(-) + +diff --git a/lib/dns/dispatch.c b/lib/dns/dispatch.c +index 7cfff94868..80d76b79c9 100644 +--- a/lib/dns/dispatch.c ++++ b/lib/dns/dispatch.c +@@ -599,34 +599,22 @@ new_portentry(dns_dispatch_t *disp, in_port_t port) { + } + + /*% +- * The caller must not hold the qid->lock. ++ * The caller must hold the qid->lock. + */ + static void + deref_portentry(dns_dispatch_t *disp, dispportentry_t **portentryp) { + dispportentry_t *portentry = *portentryp; +- dns_qid_t *qid; ++ *portentryp = NULL; + + REQUIRE(disp->port_table != NULL); + REQUIRE(portentry != NULL && portentry->refs > 0); + +- qid = DNS_QID(disp); +- LOCK(&qid->lock); +- portentry->refs--; +- +- if (portentry->refs == 0) { ++ if (--portentry->refs == 0) { + ISC_LIST_UNLINK(disp->port_table[portentry->port % + DNS_DISPATCH_PORTTABLESIZE], + portentry, link); + isc_mempool_put(disp->portpool, portentry); + } +- +- /* +- * Set '*portentryp' to NULL inside the lock so that +- * dispsock->portentry does not change in socket_search. +- */ +- *portentryp = NULL; +- +- UNLOCK(&qid->lock); + } + + /*% +@@ -764,9 +752,9 @@ get_dispsocket(dns_dispatch_t *disp, isc_sockaddr_t *dest, + if (result == ISC_R_SUCCESS) { + dispsock->socket = sock; + dispsock->host = *dest; +- dispsock->portentry = portentry; + dispsock->bucket = bucket; + LOCK(&qid->lock); ++ dispsock->portentry = portentry; + ISC_LIST_APPEND(qid->sock_table[bucket], dispsock, blink); + UNLOCK(&qid->lock); + *dispsockp = dispsock; +@@ -791,7 +779,7 @@ get_dispsocket(dns_dispatch_t *disp, isc_sockaddr_t *dest, + static void + destroy_dispsocket(dns_dispatch_t *disp, dispsocket_t **dispsockp) { + dispsocket_t *dispsock; +- dns_qid_t *qid; ++ dns_qid_t *qid = DNS_QID(disp); + + /* + * The dispatch must be locked. +@@ -803,19 +791,24 @@ destroy_dispsocket(dns_dispatch_t *disp, dispsocket_t **dispsockp) { + + disp->nsockets--; + dispsock->magic = 0; +- if (dispsock->portentry != NULL) ++ if (dispsock->portentry != NULL) { ++ /* socket_search() tests and dereferences portentry. */ ++ LOCK(&qid->lock); + deref_portentry(disp, &dispsock->portentry); +- if (dispsock->socket != NULL) ++ UNLOCK(&qid->lock); ++ } ++ if (dispsock->socket != NULL) { + isc_socket_detach(&dispsock->socket); ++ } + if (ISC_LINK_LINKED(dispsock, blink)) { +- qid = DNS_QID(disp); + LOCK(&qid->lock); + ISC_LIST_UNLINK(qid->sock_table[dispsock->bucket], dispsock, + blink); + UNLOCK(&qid->lock); + } +- if (dispsock->task != NULL) ++ if (dispsock->task != NULL) { + isc_task_detach(&dispsock->task); ++ } + isc_mempool_put(disp->mgr->spool, dispsock); + + *dispsockp = NULL; +@@ -828,7 +821,7 @@ destroy_dispsocket(dns_dispatch_t *disp, dispsocket_t **dispsockp) { + static void + deactivate_dispsocket(dns_dispatch_t *disp, dispsocket_t *dispsock) { + isc_result_t result; +- dns_qid_t *qid; ++ dns_qid_t *qid = DNS_QID(disp); + + /* + * The dispatch must be locked. +@@ -840,14 +833,16 @@ deactivate_dispsocket(dns_dispatch_t *disp, dispsocket_t *dispsock) { + } + + INSIST(dispsock->portentry != NULL); ++ /* socket_search() tests and dereferences portentry. */ ++ LOCK(&qid->lock); + deref_portentry(disp, &dispsock->portentry); ++ UNLOCK(&qid->lock); + + if (disp->nsockets > DNS_DISPATCH_POOLSOCKS) + destroy_dispsocket(disp, &dispsock); + else { + result = isc_socket_close(dispsock->socket); + +- qid = DNS_QID(disp); + LOCK(&qid->lock); + ISC_LIST_UNLINK(qid->sock_table[dispsock->bucket], dispsock, + blink); +-- +2.23.0 + diff --git a/backport-0023-Lock-zone-before-calling-zone_namerd_tostr.patch b/backport-0023-Lock-zone-before-calling-zone_namerd_tostr.patch new file mode 100644 index 0000000..e98d2ff --- /dev/null +++ b/backport-0023-Lock-zone-before-calling-zone_namerd_tostr.patch @@ -0,0 +1,69 @@ +From b2bccc68a5d81ff543298ad7781202d97d31f9c4 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Tue, 10 Nov 2020 15:53:06 +1100 +Subject: [PATCH] Lock zone before calling zone_namerd_tostr() + + WARNING: ThreadSanitizer: data race + Read of size 8 at 0x000000000001 by thread T1: + #0 inline_raw lib/dns/zone.c:1375 + #1 zone_namerd_tostr lib/dns/zone.c:15316 + #2 dns_zone_name lib/dns/zone.c:15391 + #3 xfrin_log lib/dns/xfrin.c:1605 + #4 xfrin_destroy lib/dns/xfrin.c:1477 + #5 dns_xfrin_detach lib/dns/xfrin.c:739 + #6 xfrin_connect_done lib/dns/xfrin.c:970 + #7 tcpdnsconnect_cb netmgr/tcpdns.c:786 + #8 tcp_connect_cb netmgr/tcp.c:292 + #9 + #10 + + Previous write of size 8 at 0x000000000001 by thread T2 (mutexes: write M1): + #0 zone_shutdown lib/dns/zone.c:14462 + #1 dispatch lib/isc/task.c:1152 + #2 run lib/isc/task.c:1344 + #3 + + Location is heap block of size 2769 at 0x000000000013 allocated by thread T3: + #0 malloc + #1 default_memalloc lib/isc/mem.c:713 + #2 mem_get lib/isc/mem.c:622 + #3 mem_allocateunlocked lib/isc/mem.c:1268 + #4 isc___mem_allocate lib/isc/mem.c:1288 + #5 isc__mem_allocate lib/isc/mem.c:2453 + #6 isc___mem_get lib/isc/mem.c:1037 + #7 isc__mem_get lib/isc/mem.c:2432 + #8 dns_zone_create lib/dns/zone.c:984 + #9 configure_zone bin/named/server.c:6502 + #10 do_addzone bin/named/server.c:13391 + #11 named_server_changezone bin/named/server.c:13788 + #12 named_control_docommand bin/named/control.c:207 + #13 control_command bin/named/controlconf.c:392 + #14 dispatch lib/isc/task.c:1152 + #15 run lib/isc/task.c:1344 + #16 + +(cherry picked from commit 84f43903da742a4d1040ad0dd7f1d1dca23ac31c) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/b2bccc68a5d81ff543298ad7781202d97d31f9c4 +--- + lib/dns/zone.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index 30b71bdb8c..d3d9f32bcc 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -14384,7 +14384,10 @@ void + dns_zone_name(dns_zone_t *zone, char *buf, size_t length) { + REQUIRE(DNS_ZONE_VALID(zone)); + REQUIRE(buf != NULL); ++ ++ LOCK_ZONE(zone); + zone_namerd_tostr(zone, buf, length); ++ UNLOCK_ZONE(zone); + } + + void +-- +2.23.0 + diff --git a/backport-0024-Address-TSAN-error-between-dns_rbt_findnode-and-subt.patch b/backport-0024-Address-TSAN-error-between-dns_rbt_findnode-and-subt.patch new file mode 100644 index 0000000..ee1c79e --- /dev/null +++ b/backport-0024-Address-TSAN-error-between-dns_rbt_findnode-and-subt.patch @@ -0,0 +1,163 @@ +From 62158e18821aa4b3ff3faeb8c2d33eb8b1ebd27b Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Tue, 13 Oct 2020 13:00:36 +1100 +Subject: [PATCH] Address TSAN error between dns_rbt_findnode() and + subtractrdataset(). + +Having dns_rbt_findnode() in previous_closest_nsec() check of +node->data is a optimisation that triggers a TSAN error with +subtractrdataset(). find_closest_nsec() still needs to check if +the NSEC record are active or not and look for a earlier NSEC records +if it isn't. Set DNS_RBTFIND_EMPTYDATA so node->data isn't referenced +without the node lock being held. + + WARNING: ThreadSanitizer: data race + Read of size 8 at 0x000000000001 by thread T1 (mutexes: read M1, read M2): + #0 dns_rbt_findnode lib/dns/rbt.c:1708 + #1 previous_closest_nsec lib/dns/rbtdb.c:3760 + #2 find_closest_nsec lib/dns/rbtdb.c:3942 + #3 zone_find lib/dns/rbtdb.c:4091 + #4 dns_db_findext lib/dns/db.c:536 + #5 query_lookup lib/ns/query.c:5582 + #6 ns__query_start lib/ns/query.c:5505 + #7 query_setup lib/ns/query.c:5229 + #8 ns_query_start lib/ns/query.c:11380 + #9 ns__client_request lib/ns/client.c:2166 + #10 processbuffer netmgr/tcpdns.c:230 + #11 dnslisten_readcb netmgr/tcpdns.c:309 + #12 read_cb netmgr/tcp.c:832 + #13 + #14 + + Previous write of size 8 at 0x000000000001 by thread T2 (mutexes: write M3): + #0 subtractrdataset lib/dns/rbtdb.c:7133 + #1 dns_db_subtractrdataset lib/dns/db.c:742 + #2 diff_apply lib/dns/diff.c:368 + #3 dns_diff_apply lib/dns/diff.c:459 + #4 do_one_tuple lib/dns/update.c:247 + #5 update_one_rr lib/dns/update.c:275 + #6 delete_if_action lib/dns/update.c:689 + #7 foreach_rr lib/dns/update.c:471 + #8 delete_if lib/dns/update.c:716 + #9 dns_update_signaturesinc lib/dns/update.c:1948 + #10 receive_secure_serial lib/dns/zone.c:15637 + #11 dispatch lib/isc/task.c:1152 + #12 run lib/isc/task.c:1344 + #13 + + Location is heap block of size 130 at 0x000000000028 allocated by thread T3: + #0 malloc + #1 default_memalloc lib/isc/mem.c:713 + #2 mem_get lib/isc/mem.c:622 + #3 mem_allocateunlocked lib/isc/mem.c:1268 + #4 isc___mem_allocate lib/isc/mem.c:1288 + #5 isc__mem_allocate lib/isc/mem.c:2453 + #6 isc___mem_get lib/isc/mem.c:1037 + #7 isc__mem_get lib/isc/mem.c:2432 + #8 create_node lib/dns/rbt.c:2239 + #9 dns_rbt_addnode lib/dns/rbt.c:1202 + #10 dns_rbtdb_create lib/dns/rbtdb.c:8668 + #11 dns_db_create lib/dns/db.c:118 + #12 receive_secure_db lib/dns/zone.c:16154 + #13 dispatch lib/isc/task.c:1152 + #14 run lib/isc/task.c:1344 + #15 + + Mutex M1 (0x000000000040) created at: + #0 pthread_rwlock_init + #1 isc_rwlock_init lib/isc/rwlock.c:39 + #2 dns_rbtdb_create lib/dns/rbtdb.c:8527 + #3 dns_db_create lib/dns/db.c:118 + #4 receive_secure_db lib/dns/zone.c:16154 + #5 dispatch lib/isc/task.c:1152 + #6 run lib/isc/task.c:1344 + #7 + + Mutex M2 (0x000000000044) created at: + #0 pthread_rwlock_init + #1 isc_rwlock_init lib/isc/rwlock.c:39 + #2 dns_rbtdb_create lib/dns/rbtdb.c:8600 + #3 dns_db_create lib/dns/db.c:118 + #4 receive_secure_db lib/dns/zone.c:16154 + #5 dispatch lib/isc/task.c:1152 + #6 run lib/isc/task.c:1344 + #7 + + Mutex M3 (0x000000000046) created at: + #0 pthread_rwlock_init + #1 isc_rwlock_init lib/isc/rwlock.c:39 + #2 dns_rbtdb_create lib/dns/rbtdb.c:8600 + #3 dns_db_create lib/dns/db.c:118 + #4 receive_secure_db lib/dns/zone.c:16154 + #5 dispatch lib/isc/task.c:1152 + #6 run lib/isc/task.c:1344 + #7 + + Thread T1 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create pthreads/thread.c:73 + #2 isc_nm_start netmgr/netmgr.c:232 + #3 create_managers bin/named/main.c:909 + #4 setup bin/named/main.c:1223 + #5 main bin/named/main.c:1523 + + Thread T2 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create pthreads/thread.c:73 + #2 isc_taskmgr_create lib/isc/task.c:1434 + #3 create_managers bin/named/main.c:915 + #4 setup bin/named/main.c:1223 + #5 main bin/named/main.c:1523 + + Thread T3 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create pthreads/thread.c:73 + #2 isc_taskmgr_create lib/isc/task.c:1434 + #3 create_managers bin/named/main.c:915 + #4 setup bin/named/main.c:1223 + #5 main bin/named/main.c:1523 + + SUMMARY: ThreadSanitizer: data race lib/dns/rbt.c:1708 in dns_rbt_findnode + +(cherry picked from commit 244f84a84ba7e9551edb374911438e0c36d375cc) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/62158e18821aa4b3ff3faeb8c2d33eb8b1ebd27b +--- + lib/dns/rbtdb.c | 13 ++++++------- + 1 file changed, 6 insertions(+), 7 deletions(-) + +diff --git a/lib/dns/rbtdb.c b/lib/dns/rbtdb.c +index 3b75cadcf1..3a60bcf811 100644 +--- a/lib/dns/rbtdb.c ++++ b/lib/dns/rbtdb.c +@@ -3887,11 +3887,9 @@ previous_closest_nsec(dns_rdatatype_t type, rbtdb_search_t *search, + if (result != ISC_R_SUCCESS) + return (result); + nsecnode = NULL; +- result = dns_rbt_findnode(search->rbtdb->nsec, +- target, NULL, +- &nsecnode, nsecchain, +- DNS_RBTFIND_NOOPTIONS, +- NULL, NULL); ++ result = dns_rbt_findnode( ++ search->rbtdb->nsec, target, NULL, &nsecnode, ++ nsecchain, DNS_RBTFIND_EMPTYDATA, NULL, NULL); + if (result == ISC_R_SUCCESS) { + /* + * Since this was the first loop, finding the +@@ -3936,9 +3934,10 @@ previous_closest_nsec(dns_rdatatype_t type, rbtdb_search_t *search, + *nodep = NULL; + result = dns_rbt_findnode(search->rbtdb->tree, target, NULL, + nodep, &search->chain, +- DNS_RBTFIND_NOOPTIONS, NULL, NULL); +- if (result == ISC_R_SUCCESS) ++ DNS_RBTFIND_EMPTYDATA, NULL, NULL); ++ if (result == ISC_R_SUCCESS) { + return (result); ++ } + + /* + * There should always be a node in the main tree with the +-- +2.23.0 + diff --git a/backport-0025-Address-data-race-in-dns_stats_detach-over-reference.patch b/backport-0025-Address-data-race-in-dns_stats_detach-over-reference.patch new file mode 100644 index 0000000..aedc49b --- /dev/null +++ b/backport-0025-Address-data-race-in-dns_stats_detach-over-reference.patch @@ -0,0 +1,57 @@ +From fdb893ede19df7649afd6c6c6ee7e6e666828d10 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 15 Oct 2020 16:48:24 +1100 +Subject: [PATCH] Address data race in dns_stats_detach over references + + WARNING: ThreadSanitizer: data race + Write of size 4 at 0x000000000001 by thread T1 (mutexes: write M1): + #0 dns_stats_detach lib/dns/stats.c:115:19 + #1 destroy lib/dns/view.c:527:3 + #2 dns_view_weakdetach lib/dns/view.c:704:3 + #3 zone_free lib/dns/zone.c:1149:3 + #4 zone_shutdown lib/dns/zone.c:13123:3 + #5 dispatch lib/isc/task.c:1157:7 + #6 run lib/isc/task.c:1331:2 + + Previous read of size 4 at 0x000000000001 by thread T2: + #0 dns_stats_detach lib/dns/stats.c:118:13 + #1 destroy lib/dns/view.c:527:3 + #2 dns_view_weakdetach lib/dns/view.c:704:3 + #3 zone_free lib/dns/zone.c:1152:3 + #4 zone_shutdown lib/dns/zone.c:13123:3 + #5 dispatch lib/isc/task.c:1157:7 + #6 run lib/isc/task.c:1331:2 +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/fdb893ede19df7649afd6c6c6ee7e6e666828d10 +--- + lib/dns/stats.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/lib/dns/stats.c b/lib/dns/stats.c +index 1473371c64..a0184ecf84 100644 +--- a/lib/dns/stats.c ++++ b/lib/dns/stats.c +@@ -104,6 +104,7 @@ dns_stats_attach(dns_stats_t *stats, dns_stats_t **statsp) { + + void + dns_stats_detach(dns_stats_t **statsp) { ++ unsigned int references; + dns_stats_t *stats; + + REQUIRE(statsp != NULL && DNS_STATS_VALID(*statsp)); +@@ -112,10 +113,10 @@ dns_stats_detach(dns_stats_t **statsp) { + *statsp = NULL; + + LOCK(&stats->lock); +- stats->references--; ++ references = --stats->references; + UNLOCK(&stats->lock); + +- if (stats->references == 0) { ++ if (references == 0) { + isc_stats_detach(&stats->counters); + DESTROYLOCK(&stats->lock); + isc_mem_putanddetach(&stats->mctx, stats, sizeof(*stats)); +-- +2.23.0 + diff --git a/backport-0026-Lock-check-of-DNS_ZONEFLG_EXITING-flag.patch b/backport-0026-Lock-check-of-DNS_ZONEFLG_EXITING-flag.patch new file mode 100644 index 0000000..c7d4b8d --- /dev/null +++ b/backport-0026-Lock-check-of-DNS_ZONEFLG_EXITING-flag.patch @@ -0,0 +1,56 @@ +From 042e1e3a44fbe087c41810ee8ee74d7c5cd973d6 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Fri, 16 Oct 2020 10:42:52 +1100 +Subject: [PATCH] Lock check of DNS_ZONEFLG_EXITING flag + + WARNING: ThreadSanitizer: data race + Read of size 4 at 0x000000000001 by thread T1 (mutexes: write M1): + #0 dns_zone_refresh lib/dns/zone.c:10447 + #1 dns_zone_notifyreceive2 lib/dns/zone.c:13646 + #2 dns_zone_notifyreceive2 lib/dns/zone.c:13497 + #3 ns_notify_start bin/named/notify.c:150 + #4 client_request bin/named/client.c:3137 + #5 dispatch lib/isc/task.c:1157 + #6 run lib/isc/task.c:1331 + #7 + + Previous write of size 4 at 0x000000000001 by thread T2 (mutexes: write M2): + #0 dns_zone_refresh lib/dns/zone.c:10466 + #1 zone_maintenance lib/dns/zone.c:10236 + #2 zone_timer lib/dns/zone.c:13136 + #3 dispatch lib/isc/task.c:1157 + #4 run lib/isc/task.c:1331 + #5 +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/042e1e3a44fbe087c41810ee8ee74d7c5cd973d6 +--- + lib/dns/zone.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index d3d9f32bcc..f09805f322 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -10462,15 +10462,17 @@ dns_zone_refresh(dns_zone_t *zone) { + + REQUIRE(DNS_ZONE_VALID(zone)); + +- if (DNS_ZONE_FLAG(zone, DNS_ZONEFLG_EXITING)) ++ LOCK_ZONE(zone); ++ if (DNS_ZONE_FLAG(zone, DNS_ZONEFLG_EXITING)) { ++ UNLOCK_ZONE(zone); + return; ++ } + + /* + * Set DNS_ZONEFLG_REFRESH so that there is only one refresh operation + * in progress at a time. + */ + +- LOCK_ZONE(zone); + oldflags = zone->flags; + if (zone->masterscnt == 0) { + DNS_ZONE_SETFLAG(zone, DNS_ZONEFLG_NOMASTERS); +-- +2.23.0 + diff --git a/backport-0027-Fix-locking-for-LMDB-0.9.26.patch b/backport-0027-Fix-locking-for-LMDB-0.9.26.patch new file mode 100644 index 0000000..6343675 --- /dev/null +++ b/backport-0027-Fix-locking-for-LMDB-0.9.26.patch @@ -0,0 +1,313 @@ +From 25818ac81fbc5665b101dead4b406a4dfe2d4486 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Micha=C5=82=20K=C4=99pie=C5=84?= +Date: Fri, 10 Jul 2020 11:29:18 +0200 +Subject: [PATCH] Fix locking for LMDB 0.9.26 + +When "rndc reconfig" is run, named first configures a fresh set of views +and then tears down the old views. Consider what happens for a single +view with LMDB enabled; "envA" is the pointer to the LMDB environment +used by the original/old version of the view, "envB" is the pointer to +the same LMDB environment used by the new version of that view: + + 1. mdb_env_open(envA) is called when the view is first created. + 2. "rndc reconfig" is called. + 3. mdb_env_open(envB) is called for the new instance of the view. + 4. mdb_env_close(envA) is called for the old instance of the view. + +This seems to have worked so far. However, an upstream change [1] in +LMDB which will be part of its 0.9.26 release prevents the above +sequence of calls from working as intended because the locktable mutexes +will now get destroyed by the mdb_env_close() call in step 4 above, +causing any subsequent mdb_txn_begin() calls to fail (because all of the +above steps are happening within a single named process). + +Preventing the above scenario from happening would require either +redesigning the way we use LMDB in BIND, which is not something we can +easily backport, or redesigning the way BIND carries out its +reconfiguration process, which would be an even more severe change. + +To work around the problem, set MDB_NOLOCK when calling mdb_env_open() +to stop LMDB from controlling concurrent access to the database and do +the necessary locking in named instead. Reuse the view->new_zone_lock +mutex for this purpose to prevent the need for modifying struct dns_view +(which would necessitate library API version bumps). Drop use of +MDB_NOTLS as it is made redundant by MDB_NOLOCK: MDB_NOTLS only affects +where LMDB reader locktable slots are stored while MDB_NOLOCK prevents +the reader locktable from being used altogether. + +[1] https://git.openldap.org/openldap/openldap/-/commit/2fd44e325195ae81664eb5dc36e7d265927c5ebc + +(cherry picked from commit 53120279b57e25b6462ef3ac4ef9c205a4e9192b) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/25818ac81fbc5665b101dead4b406a4dfe2d4486 +--- + .gitlab-ci.yml | 5 +--- + bin/named/server.c | 61 +++++++++++++++++++++++++++++++++----- + lib/dns/include/dns/view.h | 7 +---- + 3 files changed, 55 insertions(+), 18 deletions(-) + +diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml +index ea312c36de..bcea3c38e0 100644 +--- a/.gitlab-ci.yml ++++ b/.gitlab-ci.yml +@@ -1099,8 +1099,6 @@ clang:freebsd11.4:amd64: + variables: + CFLAGS: "${CFLAGS_COMMON}" + USER: gitlab-runner +- # Temporarily disable LMDB support [GL #1976] +- EXTRA_CONFIGURE: "--without-lmdb" + <<: *freebsd_amd64 + <<: *build_job + +@@ -1126,8 +1124,7 @@ unit:clang:freebsd11.4:amd64: + clang:freebsd12.1:amd64: + variables: + CFLAGS: "${CFLAGS_COMMON}" +- # Temporarily disable LMDB support [GL #1976] +- EXTRA_CONFIGURE: "--enable-dnstap --without-lmdb" ++ EXTRA_CONFIGURE: "--enable-dnstap" + USER: gitlab-runner + <<: *freebsd_amd64 + <<: *build_job +diff --git a/bin/named/server.c b/bin/named/server.c +index a2fa2c864c..5fd9fc1176 100644 +--- a/bin/named/server.c ++++ b/bin/named/server.c +@@ -6797,6 +6797,8 @@ count_newzones(dns_view_t *view, ns_cfgctx_t *nzcfg, int *num_zonesp) { + "for view '%s'", + view->new_zone_db, view->name); + ++ LOCK(&view->new_zone_lock); ++ + CHECK(nzd_count(view, &n)); + + *num_zonesp = n; +@@ -6811,6 +6813,8 @@ count_newzones(dns_view_t *view, ns_cfgctx_t *nzcfg, int *num_zonesp) { + if (result != ISC_R_SUCCESS) + *num_zonesp = 0; + ++ UNLOCK(&view->new_zone_lock); ++ + return (ISC_R_SUCCESS); + } + +@@ -7116,6 +7120,8 @@ typedef isc_result_t (*newzone_cfg_cb_t)(const cfg_obj_t *zconfig, + * Immediately interrupt processing if an error is encountered while + * transforming NZD data into a zone configuration object or if "callback" + * returns an error. ++ * ++ * Caller must hold 'view->new_zone_lock'. + */ + static isc_result_t + for_all_newzone_cfgs(newzone_cfg_cb_t callback, cfg_obj_t *config, +@@ -7228,8 +7234,11 @@ configure_newzones(dns_view_t *view, cfg_obj_t *config, cfg_obj_t *vconfig, + return (ISC_R_SUCCESS); + } + ++ LOCK(&view->new_zone_lock); ++ + result = nzd_open(view, MDB_RDONLY, &txn, &dbi); + if (result != ISC_R_SUCCESS) { ++ UNLOCK(&view->new_zone_lock); + return (ISC_R_SUCCESS); + } + +@@ -7256,6 +7265,9 @@ configure_newzones(dns_view_t *view, cfg_obj_t *config, cfg_obj_t *vconfig, + } + + (void) nzd_close(&txn, false); ++ ++ UNLOCK(&view->new_zone_lock); ++ + return (result); + } + +@@ -7277,6 +7289,8 @@ get_newzone_config(dns_view_t *view, const char *zonename, + + INSIST(zoneconfig != NULL && *zoneconfig == NULL); + ++ LOCK(&view->new_zone_lock); ++ + CHECK(nzd_open(view, MDB_RDONLY, &txn, &dbi)); + + isc_log_write(ns_g_lctx, +@@ -7310,6 +7324,8 @@ get_newzone_config(dns_view_t *view, const char *zonename, + cleanup: + (void) nzd_close(&txn, false); + ++ UNLOCK(&view->new_zone_lock); ++ + if (zoneconf != NULL) { + cfg_obj_destroy(ns_g_addparser, &zoneconf); + } +@@ -11638,8 +11654,6 @@ nzd_save(MDB_txn **txnp, MDB_dbi dbi, dns_zone_t *zone, + + nzd_setkey(&key, dns_zone_getorigin(zone), namebuf, sizeof(namebuf)); + +- LOCK(&view->new_zone_lock); +- + if (zconfig == NULL) { + /* We're deleting the zone from the database */ + status = mdb_del(*txnp, dbi, &key, NULL); +@@ -11739,8 +11753,6 @@ nzd_save(MDB_txn **txnp, MDB_dbi dbi, dns_zone_t *zone, + } + *txnp = NULL; + +- UNLOCK(&view->new_zone_lock); +- + if (text != NULL) { + isc_buffer_free(&text); + } +@@ -11748,6 +11760,11 @@ nzd_save(MDB_txn **txnp, MDB_dbi dbi, dns_zone_t *zone, + return (result); + } + ++/* ++ * Check whether the new zone database for 'view' can be opened for writing. ++ * ++ * Caller must hold 'view->new_zone_lock'. ++ */ + static isc_result_t + nzd_writable(dns_view_t *view) { + isc_result_t result = ISC_R_SUCCESS; +@@ -11779,6 +11796,11 @@ nzd_writable(dns_view_t *view) { + return (result); + } + ++/* ++ * Open the new zone database for 'view' and start a transaction for it. ++ * ++ * Caller must hold 'view->new_zone_lock'. ++ */ + static isc_result_t + nzd_open(dns_view_t *view, unsigned int flags, MDB_txn **txnp, MDB_dbi *dbi) { + int status; +@@ -11909,6 +11931,13 @@ nzd_env_reopen(dns_view_t *view) { + return (result); + } + ++/* ++ * If 'commit' is true, commit the new zone database transaction pointed to by ++ * 'txnp'; otherwise, abort that transaction. ++ * ++ * Caller must hold 'view->new_zone_lock' for the view that the transaction ++ * pointed to by 'txnp' was started for. ++ */ + static isc_result_t + nzd_close(MDB_txn **txnp, bool commit) { + isc_result_t result = ISC_R_SUCCESS; +@@ -11931,6 +11960,12 @@ nzd_close(MDB_txn **txnp, bool commit) { + return (result); + } + ++/* ++ * Count the zones configured in the new zone database for 'view' and store the ++ * result in 'countp'. ++ * ++ * Caller must hold 'view->new_zone_lock'. ++ */ + static isc_result_t + nzd_count(dns_view_t *view, int *countp) { + isc_result_t result; +@@ -11979,6 +12014,8 @@ migrate_nzf(dns_view_t *view) { + MDB_val key, data; + ns_dzarg_t dzarg; + ++ LOCK(&view->new_zone_lock); ++ + /* + * If NZF file doesn't exist, or NZD DB exists and already + * has data, return without attempting migration. +@@ -12122,6 +12159,8 @@ migrate_nzf(dns_view_t *view) { + result = nzd_close(&txn, commit); + } + ++ UNLOCK(&view->new_zone_lock); ++ + if (text != NULL) { + isc_buffer_free(&text); + } +@@ -12325,6 +12364,7 @@ do_addzone(ns_server_t *server, ns_cfgctx_t *cfg, dns_view_t *view, + MDB_dbi dbi; + + UNUSED(zoneconf); ++ LOCK(&view->new_zone_lock); + #endif /* HAVE_LMDB */ + + /* Zone shouldn't already exist */ +@@ -12465,6 +12505,7 @@ do_addzone(ns_server_t *server, ns_cfgctx_t *cfg, dns_view_t *view, + #else /* HAVE_LMDB */ + if (txn != NULL) + (void) nzd_close(&txn, false); ++ UNLOCK(&view->new_zone_lock); + #endif /* HAVE_LMDB */ + + if (zone != NULL) +@@ -12488,6 +12529,7 @@ do_modzone(ns_server_t *server, ns_cfgctx_t *cfg, dns_view_t *view, + #else /* HAVE_LMDB */ + MDB_txn *txn = NULL; + MDB_dbi dbi; ++ LOCK(&view->new_zone_lock); + #endif /* HAVE_LMDB */ + + /* Zone must already exist */ +@@ -12667,6 +12709,7 @@ do_modzone(ns_server_t *server, ns_cfgctx_t *cfg, dns_view_t *view, + #else /* HAVE_LMDB */ + if (txn != NULL) + (void) nzd_close(&txn, false); ++ UNLOCK(&view->new_zone_lock); + #endif /* HAVE_LMDB */ + + if (zone != NULL) +@@ -12816,6 +12859,7 @@ rmzone(isc_task_t *task, isc_event_t *event) { + if (added && cfg != NULL) { + #ifdef HAVE_LMDB + /* Make sure we can open the NZD database */ ++ LOCK(&view->new_zone_lock); + result = nzd_open(view, 0, &txn, &dbi); + if (result != ISC_R_SUCCESS) { + isc_log_write(ns_g_lctx, NS_LOGCATEGORY_GENERAL, +@@ -12834,6 +12878,11 @@ rmzone(isc_task_t *task, isc_event_t *event) { + "delete zone configuration: %s", + isc_result_totext(result)); + } ++ ++ if (txn != NULL) { ++ (void)nzd_close(&txn, false); ++ } ++ UNLOCK(&view->new_zone_lock); + #else + result = delete_zoneconf(view, cfg->add_parser, + cfg->nzf_config, +@@ -12926,10 +12975,6 @@ rmzone(isc_task_t *task, isc_event_t *event) { + } + } + +-#ifdef HAVE_LMDB +- if (txn != NULL) +- (void) nzd_close(&txn, false); +-#endif + if (raw != NULL) + dns_zone_detach(&raw); + dns_zone_detach(&zone); +diff --git a/lib/dns/include/dns/view.h b/lib/dns/include/dns/view.h +index c849dec154..09a9725de1 100644 +--- a/lib/dns/include/dns/view.h ++++ b/lib/dns/include/dns/view.h +@@ -240,12 +240,7 @@ struct dns_view { + + #ifdef HAVE_LMDB + #include +-/* +- * MDB_NOTLS is used to prevent problems after configuration is reloaded, due +- * to the way LMDB's use of thread-local storage (TLS) interacts with the BIND9 +- * thread model. +- */ +-#define DNS_LMDB_COMMON_FLAGS (MDB_CREATE | MDB_NOSUBDIR | MDB_NOTLS) ++#define DNS_LMDB_COMMON_FLAGS (MDB_CREATE | MDB_NOSUBDIR | MDB_NOLOCK) + #ifndef __OpenBSD__ + #define DNS_LMDB_FLAGS (DNS_LMDB_COMMON_FLAGS) + #else /* __OpenBSD__ */ +-- +2.23.0 + diff --git a/backport-0028-Correctly-encode-LOC-records-with-non-integer-negati.patch b/backport-0028-Correctly-encode-LOC-records-with-non-integer-negati.patch new file mode 100644 index 0000000..6932aec --- /dev/null +++ b/backport-0028-Correctly-encode-LOC-records-with-non-integer-negati.patch @@ -0,0 +1,48 @@ +From 19b95a5f372b0b621a16699d7e6ebb2717a31314 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Sun, 23 Aug 2020 01:38:17 +1000 +Subject: [PATCH] Correctly encode LOC records with non integer negative + altitudes. + +(cherry picked from commit 337cc878fa5c6a93664b402a5fb7ee06d9b3a0f2) +Conflict: modify seg2 +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/19b95a5f372b0b621a16699d7e6ebb2717a31314 +--- + lib/dns/rdata/generic/loc_29.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/lib/dns/rdata/generic/loc_29.c b/lib/dns/rdata/generic/loc_29.c +index 0ef4360ffe..1e29d92ba9 100644 +--- a/lib/dns/rdata/generic/loc_29.c ++++ b/lib/dns/rdata/generic/loc_29.c +@@ -27,6 +27,7 @@ fromtext_loc(ARGS_FROMTEXT) { + unsigned char version; + bool east = false; + bool north = false; ++ bool negative = false; + long tmp; + long m; + long cm; +@@ -280,6 +281,9 @@ fromtext_loc(ARGS_FROMTEXT) { + */ + RETERR(isc_lex_getmastertoken(lexer, &token, isc_tokentype_string, + false)); ++ if (DNS_AS_STR(token)[0] == '-') { ++ negative = true; ++ } + m = strtol(DNS_AS_STR(token), &e, 10); + if (*e != 0 && *e != '.' && *e != 'm') + RETTOK(DNS_R_SYNTAX); +@@ -324,6 +328,9 @@ fromtext_loc(ARGS_FROMTEXT) { + /* + * Adjust base. + */ ++ if (m < 0 || negative) { ++ cm = -cm; ++ } + altitude = m + 100000; + altitude *= 100; + altitude += cm; +-- +2.23.0 + diff --git a/backport-0029-isc_ratelimiter-needs-to-hold-a-reference-to-its-tas.patch b/backport-0029-isc_ratelimiter-needs-to-hold-a-reference-to-its-tas.patch new file mode 100644 index 0000000..c057210 --- /dev/null +++ b/backport-0029-isc_ratelimiter-needs-to-hold-a-reference-to-its-tas.patch @@ -0,0 +1,45 @@ +From a7da8f84caabb975ac0e21ca3c0816bca81413e5 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Wed, 19 Aug 2020 18:45:38 +1000 +Subject: [PATCH] isc_ratelimiter needs to hold a reference to its task + +to prevent the task subsystem shutting down before the +ratelimiter is freed. + +(cherry picked from commit b8e4b6d30325168bee19e1f05e286c5d3592c4ff) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/a7da8f84caabb975ac0e21ca3c0816bca81413e5 +--- + lib/isc/ratelimiter.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/lib/isc/ratelimiter.c b/lib/isc/ratelimiter.c +index 3fd211f633..1fefe9b25a 100644 +--- a/lib/isc/ratelimiter.c ++++ b/lib/isc/ratelimiter.c +@@ -252,11 +252,14 @@ isc_ratelimiter_shutdown(isc_ratelimiter_t *rl) { + (void)isc_timer_reset(rl->timer, isc_timertype_inactive, + NULL, NULL, false); + while ((ev = ISC_LIST_HEAD(rl->pending)) != NULL) { ++ task = ev->ev_sender; + ISC_LIST_UNLINK(rl->pending, ev, ev_ratelink); + ev->ev_attributes |= ISC_EVENTATTR_CANCELED; + task = ev->ev_sender; + isc_task_send(task, &ev); + } ++ task = NULL; ++ isc_task_attach(rl->task, &task); + isc_timer_detach(&rl->timer); + + /* +@@ -276,6 +279,7 @@ ratelimiter_shutdowncomplete(isc_task_t *task, isc_event_t *event) { + UNUSED(task); + + isc_ratelimiter_detach(&rl); ++ isc_task_detach(&task); + } + + static void +-- +2.23.0 + diff --git a/backport-0030-Lock-access-to-flags-in-dns__zone_loadpending.patch b/backport-0030-Lock-access-to-flags-in-dns__zone_loadpending.patch new file mode 100644 index 0000000..ce07966 --- /dev/null +++ b/backport-0030-Lock-access-to-flags-in-dns__zone_loadpending.patch @@ -0,0 +1,46 @@ +From d8fa989c2934398c9cd665aea1a6d34616dfcb1a Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 20 Aug 2020 10:55:58 +1000 +Subject: [PATCH] Lock access to flags in dns__zone_loadpending + +================== +WARNING: ThreadSanitizer: data race (pid=1938) + Read of size 4 at 0x7b7800000dd4 by main thread: + #0 dns__zone_loadpending /builds/isc-projects/bind9/lib/dns/zone.c:2308:10 (libdns.so.1110+0x1da342) + #1 asyncload_zone /builds/isc-projects/bind9/lib/dns/tests/zt_test.c:204:9 (zt_test+0x4b8303) + #2 (libcmocka.so.0+0x50d8) + #3 __libc_start_main /build/glibc-vjB4T1/glibc-2.28/csu/../csu/libc-start.c:308:16 (libc.so.6+0x2409a) + + Previous write of size 4 at 0x7b7800000dd4 by thread T16 (mutexes: write M2181): + #0 zone_asyncload /builds/isc-projects/bind9/lib/dns/zone.c:2237:3 (libdns.so.1110+0x1da08d) + #1 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x50845) + #2 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d799) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/d8fa989c2934398c9cd665aea1a6d34616dfcb1a +--- + lib/dns/zone.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index c8e880f4c1..97e24c7e7f 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -2303,9 +2303,14 @@ dns_zone_asyncload2(dns_zone_t *zone, dns_zt_zoneloaded_t done, void * arg, + + bool + dns__zone_loadpending(dns_zone_t *zone) { ++ bool result; ++ + REQUIRE(DNS_ZONE_VALID(zone)); + +- return (DNS_ZONE_FLAG(zone, DNS_ZONEFLG_LOADPENDING)); ++ LOCK_ZONE(zone); ++ result = DNS_ZONE_FLAG(zone, DNS_ZONEFLG_LOADPENDING) != 0; ++ UNLOCK_ZONE(zone); ++ return (result); + } + + isc_result_t +-- +2.23.0 + diff --git a/backport-0031-Update-init_count-atomically-to-silence-tsan-errors.patch b/backport-0031-Update-init_count-atomically-to-silence-tsan-errors.patch new file mode 100644 index 0000000..0b1f151 --- /dev/null +++ b/backport-0031-Update-init_count-atomically-to-silence-tsan-errors.patch @@ -0,0 +1,45 @@ +From ca0c1e5b4bbd57060321d7afe2d606227fbe358a Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 20 Aug 2020 11:24:05 +1000 +Subject: [PATCH] Update 'init_count' atomically to silence tsan errors. + +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/ca0c1e5b4bbd57060321d7afe2d606227fbe358a +--- + lib/dns/rbtdb.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/lib/dns/rbtdb.c b/lib/dns/rbtdb.c +index baf764174a..88c39bf714 100644 +--- a/lib/dns/rbtdb.c ++++ b/lib/dns/rbtdb.c +@@ -6839,7 +6839,7 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version, + newheader->attributes |= RDATASET_ATTR_ZEROTTL; + newheader->noqname = NULL; + newheader->closest = NULL; +- newheader->count = init_count++; ++ newheader->count = isc_atomic_xadd((int32_t*)&init_count, 1); + newheader->trust = rdataset->trust; + newheader->additional_auth = NULL; + newheader->additional_glue = NULL; +@@ -7035,7 +7035,7 @@ subtractrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version, + newheader->trust = 0; + newheader->noqname = NULL; + newheader->closest = NULL; +- newheader->count = init_count++; ++ newheader->count = isc_atomic_xadd((int32_t*)&init_count, 1); + newheader->additional_auth = NULL; + newheader->additional_glue = NULL; + newheader->last_used = 0; +@@ -7481,7 +7481,7 @@ loading_addrdataset(void *arg, dns_name_t *name, dns_rdataset_t *rdataset) { + newheader->serial = 1; + newheader->noqname = NULL; + newheader->closest = NULL; +- newheader->count = init_count++; ++ newheader->count = isc_atomic_xadd((int32_t*)&init_count, 1); + newheader->additional_auth = NULL; + newheader->additional_glue = NULL; + newheader->last_used = 0; +-- +2.23.0 + diff --git a/backport-0032-dig-bufsize-0-failed-to-disable-EDNS-as-a-side-effec.patch b/backport-0032-dig-bufsize-0-failed-to-disable-EDNS-as-a-side-effec.patch new file mode 100644 index 0000000..95837e1 --- /dev/null +++ b/backport-0032-dig-bufsize-0-failed-to-disable-EDNS-as-a-side-effec.patch @@ -0,0 +1,257 @@ +From 46dc1c34f9a4ef533e8202086b427a73c20a3cc7 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Wed, 29 Jul 2020 12:34:54 +1000 +Subject: [PATCH] dig +bufsize=0 failed to disable EDNS as a side effect. + +(cherry picked from commit 0dc04cb901197d10a7ce90fd4bc0ef228a7b3171) +Conflict: delete CHANGES +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/46dc1c34f9a4ef533e8202086b427a73c20a3cc7 +--- + CHANGES | 3 +++ + bin/dig/dig.c | 39 ++++++++++++++++++++----------- + bin/dig/dig.docbook | 14 ++++++----- + bin/dig/dighost.c | 12 ++++++---- + bin/dig/include/dig/dig.h | 6 ++++- + bin/dig/nslookup.c | 1 - + bin/tests/system/digdelv/tests.sh | 28 +++++++++++++++++++++- + 7 files changed, 75 insertions(+), 28 deletions(-) + +diff --git a/bin/dig/dig.c b/bin/dig/dig.c +index 996cbb9495..32e5c67063 100644 +--- a/bin/dig/dig.c ++++ b/bin/dig/dig.c +@@ -173,7 +173,7 @@ help(void) { + " +[no]authority (Control display of authority section)\n" + " +[no]badcookie (Retry BADCOOKIE responses)\n" + " +[no]besteffort (Try to parse even illegal messages)\n" +-" +bufsize=### (Set EDNS0 Max UDP packet size)\n" ++" +bufsize[=###] (Set EDNS0 Max UDP packet size)\n" + " +[no]cdflag (Set checking disabled flag in query)\n" + " +[no]class (Control display of class in records)\n" + " +[no]cmd (Control display of command line -\n" +@@ -895,15 +895,21 @@ plus_option(const char *option, bool is_batchfile, + break; + case 'u':/* bufsize */ + FULLCHECK("bufsize"); +- if (value == NULL) +- goto need_value; +- if (!state) ++ if (!state) { + goto invalid_option; ++ } ++ if (value == NULL) { ++ lookup->udpsize = DEFAULT_EDNS_BUFSIZE; ++ break; ++ } + result = parse_uint(&num, value, COMMSIZE, + "buffer size"); + if (result != ISC_R_SUCCESS) + fatal("Couldn't parse buffer size"); + lookup->udpsize = num; ++ if (lookup->udpsize == 0) { ++ lookup->edns = -1; ++ } + break; + default: + goto invalid_option; +@@ -941,8 +947,9 @@ plus_option(const char *option, bool is_batchfile, + break; + case 'o': /* cookie */ + FULLCHECK("cookie"); +- if (state && lookup->edns == -1) +- lookup->edns = 0; ++ if (state && lookup->edns == -1) { ++ lookup->edns = DEFAULT_EDNS_VERSION; ++ } + lookup->sendcookie = state; + if (value != NULL) { + n = strlcpy(hexcookie, value, +@@ -975,8 +982,9 @@ plus_option(const char *option, bool is_batchfile, + break; + case 'n': /* dnssec */ + FULLCHECK("dnssec"); +- if (state && lookup->edns == -1) +- lookup->edns = 0; ++ if (state && lookup->edns == -1) { ++ lookup->edns = DEFAULT_EDNS_VERSION; ++ } + lookup->dnssec = state; + break; + case 'o': /* domain */ +@@ -1019,7 +1027,8 @@ plus_option(const char *option, bool is_batchfile, + break; + } + if (value == NULL) { +- lookup->edns = 0; ++ lookup->edns = ++ DEFAULT_EDNS_VERSION; + break; + } + result = parse_uint(&num, +@@ -1180,8 +1189,9 @@ plus_option(const char *option, bool is_batchfile, + switch (cmd[2]) { + case 'i': /* nsid */ + FULLCHECK("nsid"); +- if (state && lookup->edns == -1) +- lookup->edns = 0; ++ if (state && lookup->edns == -1) { ++ lookup->edns = DEFAULT_EDNS_VERSION; ++ } + lookup->nsid = state; + break; + case 's': /* nssearch */ +@@ -1385,8 +1395,9 @@ plus_option(const char *option, bool is_batchfile, + } + break; + } +- if (lookup->edns == -1) +- lookup->edns = 0; ++ if (lookup->edns == -1) { ++ lookup->edns = DEFAULT_EDNS_VERSION; ++ } + if (lookup->ecs_addr != NULL) { + isc_mem_free(mctx, lookup->ecs_addr); + lookup->ecs_addr = NULL; +@@ -1926,7 +1937,7 @@ parse_args(bool is_batchfile, bool config_only, + debug("making new lookup"); + default_lookup = make_empty_lookup(); + default_lookup->adflag = true; +- default_lookup->edns = 0; ++ default_lookup->edns = DEFAULT_EDNS_VERSION; + default_lookup->sendcookie = true; + + #ifndef NOPOSIX +diff --git a/bin/dig/dig.docbook b/bin/dig/dig.docbook +index 57ff556d49..456d1a88fe 100644 +--- a/bin/dig/dig.docbook ++++ b/bin/dig/dig.docbook +@@ -570,12 +570,14 @@ + + + +- Set the UDP message buffer size advertised using EDNS0 +- to B bytes. The maximum and +- minimum sizes of this buffer are 65535 and 0 respectively. +- Values outside this range are rounded up or down +- appropriately. Values other than zero will cause a +- EDNS query to be sent. ++ This option sets the UDP message buffer size advertised ++ using EDNS0 to B bytes. The ++ maximum and minimum sizes of this buffer are 65535 ++ and 0, respectively. +bufsize=0 ++ disables EDNS (use +bufsize=0 +edns ++ to send a EDNS messages with a advertised size of 0 ++ bytes). +bufsize restores the ++ default buffer size. + + + +diff --git a/bin/dig/dighost.c b/bin/dig/dighost.c +index 8551459078..e82f176d98 100644 +--- a/bin/dig/dighost.c ++++ b/bin/dig/dighost.c +@@ -846,7 +846,7 @@ make_empty_lookup(void) { + looknew->rdclass_sigchaseset = false; + #endif + #endif +- looknew->udpsize = 0; ++ looknew->udpsize = -1; + looknew->edns = -1; + looknew->recurse = true; + looknew->aaonly = false; +@@ -2641,10 +2641,12 @@ setup_lookup(dig_lookup_t *lookup) { + unsigned int flags; + unsigned int i = 0; + +- if (lookup->udpsize == 0) +- lookup->udpsize = 4096; +- if (lookup->edns < 0) +- lookup->edns = 0; ++ if (lookup->udpsize < 0) { ++ lookup->udpsize = DEFAULT_EDNS_BUFSIZE; ++ } ++ if (lookup->edns < 0) { ++ lookup->edns = DEFAULT_EDNS_VERSION; ++ } + + if (lookup->nsid) { + INSIST(i < MAXOPTS); +diff --git a/bin/dig/include/dig/dig.h b/bin/dig/include/dig/dig.h +index cc37c55db5..1ced573a50 100644 +--- a/bin/dig/include/dig/dig.h ++++ b/bin/dig/include/dig/dig.h +@@ -63,6 +63,10 @@ + #define SERVER_TIMEOUT 1 + + #define LOOKUP_LIMIT 64 ++ ++#define DEFAULT_EDNS_VERSION 0 ++#define DEFAULT_EDNS_BUFSIZE 4096 ++ + /*% + * Lookup_limit is just a limiter, keeping too many lookups from being + * created. It's job is mainly to prevent the program from running away +@@ -180,7 +184,7 @@ bool sigchase; + dig_query_t *xfr_q; + uint32_t retries; + int nsfound; +- uint16_t udpsize; ++ int16_t udpsize; + int16_t edns; + uint32_t ixfr_serial; + isc_buffer_t rdatabuf; +diff --git a/bin/dig/nslookup.c b/bin/dig/nslookup.c +index d8c3b38080..8a3a84244b 100644 +--- a/bin/dig/nslookup.c ++++ b/bin/dig/nslookup.c +@@ -808,7 +808,6 @@ addlookup(char *opt) { + lookup->recurse = recurse; + lookup->aaonly = aaonly; + lookup->retries = tries; +- lookup->udpsize = 0; + lookup->comments = comments; + if (lookup->rdtype == dns_rdatatype_any && !tcpmode_set) + lookup->tcp_mode = true; +diff --git a/bin/tests/system/digdelv/tests.sh b/bin/tests/system/digdelv/tests.sh +index 3d1010e1b7..31107f89c8 100644 +--- a/bin/tests/system/digdelv/tests.sh ++++ b/bin/tests/system/digdelv/tests.sh +@@ -649,8 +649,34 @@ ret=0 + pat='^;-m\..*IN.*A$' + tr -d '\r' < dig.out.test$n | grep "$pat" > /dev/null || ret=1 + grep "Dump of all outstanding memory allocations" dig.out.test$n > /dev/null && ret=1 +- if [ $ret != 0 ]; then echo_i "failed"; fi ++ if [ $ret -ne 0 ]; then echo_i "failed"; fi ++ status=`expr $status + $ret` ++ ++ n=$((n+1)) ++ echo_i "check that dig +bufsize=0 disables EDNS ($n)" ++ ret=0 ++ $DIG $DIGOPTS @10.53.0.3 a.example +bufsize=0 +qr > dig.out.test$n 2>&1 || ret=1 ++ grep "EDNS:" dig.out.test$n > /dev/null && ret=1 ++ if [ $ret -ne 0 ]; then echo_i "failed"; fi ++ status=`expr $status + $ret` ++ ++ n=$((n+1)) ++ echo_i "check that dig +bufsize=0 +edns sends EDNS with bufsize of 0 ($n)" ++ ret=0 ++ $DIG $DIGOPTS @10.53.0.3 a.example +bufsize=0 +edns +qr > dig.out.test$n 2>&1 || ret=1 ++ grep -E 'EDNS:.* udp: 0\r{0,1}$' dig.out.test$n > /dev/null|| ret=1 ++ if [ $ret -ne 0 ]; then echo_i "failed"; fi + status=`expr $status + $ret` ++ ++ n=$((n+1)) ++ echo_i "check that dig +bufsize restores default bufsize ($n)" ++ ret=0 ++ $DIG $DIGOPTS @10.53.0.3 a.example +bufsize=0 +bufsize +qr > dig.out.test$n 2>&1 || ret=1 ++ lines=`grep "EDNS:.* udp: 4096" dig.out.test$n | wc -l` ++ test $lines -eq 2 || ret=1 ++ if [ $ret -ne 0 ]; then echo_i "failed"; fi ++ status=`expr $status + $ret` ++ + else + echo_i "$DIG is needed, so skipping these dig tests" + fi +-- +2.23.0 + diff --git a/backport-0033-Remove-optimisation-on-obtaining-a-headlock-as-it-tr.patch b/backport-0033-Remove-optimisation-on-obtaining-a-headlock-as-it-tr.patch new file mode 100644 index 0000000..c7a989c --- /dev/null +++ b/backport-0033-Remove-optimisation-on-obtaining-a-headlock-as-it-tr.patch @@ -0,0 +1,51 @@ +From fb8a3c9ab23c3820c706714603f066132959ab90 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 27 Aug 2020 13:21:13 +1000 +Subject: [PATCH] Remove optimisation on obtaining a headlock as it triggers a + tsan. + +WARNING: ThreadSanitizer: data race (pid=15898) + Write of size 8 at 0x7b6400011818 by thread T9 (mutexes: write M1597): + #0 get_client /builds/isc-projects/bind9/bin/named/client.c:3876:3 (named+0x4db171) + #1 ns_client_replace /builds/isc-projects/bind9/bin/named/client.c:3710:12 (named+0x4d737b) + #2 query_recurse /builds/isc-projects/bind9/bin/named/query.c:4325:13 (named+0x4ff469) + #3 query_find /builds/isc-projects/bind9/bin/named/query.c (named+0x4fb949) + #4 ns_query_start /builds/isc-projects/bind9/bin/named/query.c:9675:8 (named+0x4f37cb) + #5 client_request /builds/isc-projects/bind9/bin/named/client.c:3112:3 (named+0x4de9ef) + #6 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x50845) + #7 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d799) + + Previous read of size 8 at 0x7b6400011818 by thread T2: + #0 exit_check /builds/isc-projects/bind9/bin/named/client.c:698:5 (named+0x4d5d22) + #1 ns_client_detach /builds/isc-projects/bind9/bin/named/client.c:3687:8 (named+0x4d7762) + #2 query_find /builds/isc-projects/bind9/bin/named/query.c (named+0x4f9021) + #3 query_resume /builds/isc-projects/bind9/bin/named/query.c:4164:12 (named+0x509b68) + #4 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x50845) + #5 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d799) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/fb8a3c9ab23c3820c706714603f066132959ab90 +--- + lib/isc/include/isc/queue.h | 6 +----- + 1 file changed, 1 insertion(+), 5 deletions(-) + +diff --git a/lib/isc/include/isc/queue.h b/lib/isc/include/isc/queue.h +index 210f302c84..d682ba4940 100644 +--- a/lib/isc/include/isc/queue.h ++++ b/lib/isc/include/isc/queue.h +@@ -93,12 +93,8 @@ + do { \ + bool headlocked = false; \ + ISC_QLINK_INSIST(!ISC_QLINK_LINKED(elt, link)); \ +- if ((queue).head == NULL) { \ +- LOCK(&(queue).headlock); \ +- headlocked = true; \ +- } \ + LOCK(&(queue).taillock); \ +- if ((queue).tail == NULL && !headlocked) { \ ++ if ((queue).tail == NULL) { \ + UNLOCK(&(queue).taillock); \ + LOCK(&(queue).headlock); \ + LOCK(&(queue).taillock); \ +-- +2.23.0 + diff --git a/backport-0034-Address-tsan-error-in-view-destroy.patch b/backport-0034-Address-tsan-error-in-view-destroy.patch new file mode 100644 index 0000000..e7a6568 --- /dev/null +++ b/backport-0034-Address-tsan-error-in-view-destroy.patch @@ -0,0 +1,30 @@ +From deb3bf845bd672691f1c733160b9d658e243edb4 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Sat, 22 Aug 2020 11:12:55 +1000 +Subject: [PATCH] Address tsan error in view:destroy() + +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/deb3bf845bd672691f1c733160b9d658e243edb4 +--- + lib/dns/view.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/lib/dns/view.c b/lib/dns/view.c +index c1ece2e682..006f99794d 100644 +--- a/lib/dns/view.c ++++ b/lib/dns/view.c +@@ -362,9 +362,11 @@ destroy(dns_view_t *view) { + dns_dlzdb_t *dlzdb; + + REQUIRE(!ISC_LINK_LINKED(view, link)); ++ LOCK(&view->lock); + REQUIRE(RESSHUTDOWN(view)); + REQUIRE(ADBSHUTDOWN(view)); + REQUIRE(REQSHUTDOWN(view)); ++ UNLOCK(&view->lock); + + isc_refcount_destroy(&view->references); + isc_refcount_destroy(&view->weakrefs); +-- +2.23.0 + diff --git a/backport-0035-Lock-access-to-ctx-blocked-as-it-is-updated-by-multi.patch b/backport-0035-Lock-access-to-ctx-blocked-as-it-is-updated-by-multi.patch new file mode 100644 index 0000000..ad1e7ab --- /dev/null +++ b/backport-0035-Lock-access-to-ctx-blocked-as-it-is-updated-by-multi.patch @@ -0,0 +1,62 @@ +From bcfbc17384125d328a0f9ab70057cb6540b134c7 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Fri, 21 Aug 2020 14:05:21 +1000 +Subject: [PATCH] Lock access to ctx->blocked as it is updated by multiple + threads + +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/bcfbc17384125d328a0f9ab70057cb6540b134c7 +--- + lib/isc/unix/app.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +diff --git a/lib/isc/unix/app.c b/lib/isc/unix/app.c +index 567f300195..dd7d0e8746 100644 +--- a/lib/isc/unix/app.c ++++ b/lib/isc/unix/app.c +@@ -743,8 +743,12 @@ isc__app_ctxrun(isc_appctx_t *ctx0) { + return (ISC_R_RELOAD); + } + +- if (ctx->want_shutdown && ctx->blocked) ++ LOCK(&ctx->lock); ++ if (ctx->want_shutdown && ctx->blocked) { ++ UNLOCK(&ctx->lock); + exit(1); ++ } ++ UNLOCK(&ctx->lock); + } + + return (ISC_R_SUCCESS); +@@ -930,10 +934,14 @@ isc__app_block(void) { + #ifdef ISC_PLATFORM_USETHREADS + sigset_t sset; + #endif /* ISC_PLATFORM_USETHREADS */ ++ ++ LOCK(&isc_g_appctx.lock); ++ + REQUIRE(isc_g_appctx.running); + REQUIRE(!isc_g_appctx.blocked); + + isc_g_appctx.blocked = true; ++ UNLOCK(&isc_g_appctx.lock); + #ifdef ISC_PLATFORM_USETHREADS + blockedthread = pthread_self(); + RUNTIME_CHECK(sigemptyset(&sset) == 0 && +@@ -949,10 +957,13 @@ isc__app_unblock(void) { + sigset_t sset; + #endif /* ISC_PLATFORM_USETHREADS */ + ++ LOCK(&isc_g_appctx.lock); ++ + REQUIRE(isc_g_appctx.running); + REQUIRE(isc_g_appctx.blocked); + + isc_g_appctx.blocked = false; ++ UNLOCK(&isc_g_appctx.lock); + + #ifdef ISC_PLATFORM_USETHREADS + REQUIRE(blockedthread == pthread_self()); +-- +2.23.0 + diff --git a/backport-0036-Only-test-node-data-if-we-care-about-whether-data-is.patch b/backport-0036-Only-test-node-data-if-we-care-about-whether-data-is.patch new file mode 100644 index 0000000..d10d49a --- /dev/null +++ b/backport-0036-Only-test-node-data-if-we-care-about-whether-data-is.patch @@ -0,0 +1,100 @@ +From 80bf3f38525a8fddca2bbc61d8fe228475cc7c1e Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Wed, 26 Aug 2020 16:24:13 +1000 +Subject: [PATCH] Only test node->data if we care about whether data is present + or not. + +WARNING: ThreadSanitizer: data race (pid=28788) + Write of size 8 at 0x7b200002e060 by thread T1 (mutexes: write M2947): + #0 add32 /builds/isc-projects/bind9/lib/dns/rbtdb.c:6638:18 (libdns.so.1110+0xe7843) + #1 addrdataset /builds/isc-projects/bind9/lib/dns/rbtdb.c:6975:12 (libdns.so.1110+0xe4185) + #2 dns_db_addrdataset /builds/isc-projects/bind9/lib/dns/db.c:783:10 (libdns.so.1110+0x650ee) + #3 validated /builds/isc-projects/bind9/lib/dns/resolver.c:5140:11 (libdns.so.1110+0x1909f7) + #4 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507f5) + #5 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d749) + + Previous read of size 8 at 0x7b200002e060 by thread T5 (mutexes: write M521146194917735760): + #0 dns_rbt_findnode /builds/isc-projects/bind9/lib/dns/rbt.c:1708:9 (libdns.so.1110+0xd910d) + #1 cache_find /builds/isc-projects/bind9/lib/dns/rbtdb.c:5098:11 (libdns.so.1110+0xe188e) + #2 dns_db_find /builds/isc-projects/bind9/lib/dns/db.c:554:11 (libdns.so.1110+0x642bb) + #3 dns_view_find2 /builds/isc-projects/bind9/lib/dns/view.c:1068:11 (libdns.so.1110+0x1cc2c4) + #4 dbfind_name /builds/isc-projects/bind9/lib/dns/adb.c:3714:11 (libdns.so.1110+0x46a4b) + #5 dns_adb_createfind2 /builds/isc-projects/bind9/lib/dns/adb.c:3133:12 (libdns.so.1110+0x45278) + #6 findname /builds/isc-projects/bind9/lib/dns/resolver.c:3166:11 (libdns.so.1110+0x1827f0) + #7 fctx_getaddresses /builds/isc-projects/bind9/lib/dns/resolver.c:3462:3 (libdns.so.1110+0x18032d) + #8 fctx_try /builds/isc-projects/bind9/lib/dns/resolver.c:3819:12 (libdns.so.1110+0x17e174) + #9 fctx_start /builds/isc-projects/bind9/lib/dns/resolver.c:4219:4 (libdns.so.1110+0x1787a3) + #10 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507f5) + #11 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d749) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/80bf3f38525a8fddca2bbc61d8fe228475cc7c1e +--- + lib/dns/rbt.c | 19 +++++++++++-------- + 1 file changed, 11 insertions(+), 8 deletions(-) + +diff --git a/lib/dns/rbt.c b/lib/dns/rbt.c +index 04cf306220..d886d5bbbf 100644 +--- a/lib/dns/rbt.c ++++ b/lib/dns/rbt.c +@@ -218,6 +218,9 @@ getdata(dns_rbtnode_t *node, file_header_t *header) { + #define IS_ROOT(node) ((node)->is_root == 1) + #define FINDCALLBACK(node) ((node)->find_callback == 1) + ++#define WANTEMPTYDATA_OR_DATA(options, node) \ ++ ((options & DNS_RBTFIND_EMPTYDATA) != 0 || DATA(node) != NULL) ++ + /*% + * Structure elements from the rbtdb.c, not + * used as part of the rbt.c algorithms. +@@ -1705,9 +1708,9 @@ dns_rbt_findnode(dns_rbt_t *rbt, const dns_name_t *name, dns_name_t *foundname, + /* + * This might be the closest enclosing name. + */ +- if (DATA(current) != NULL || +- (options & DNS_RBTFIND_EMPTYDATA) != 0) ++ if (WANTEMPTYDATA_OR_DATA(options, current)) { + *node = current; ++ } + + /* + * Point the chain to the next level. This +@@ -1778,8 +1781,7 @@ dns_rbt_findnode(dns_rbt_t *rbt, const dns_name_t *name, dns_name_t *foundname, + * ISC_R_SUCCESS to indicate an exact match. + */ + if (current != NULL && (options & DNS_RBTFIND_NOEXACT) == 0 && +- (DATA(current) != NULL || +- (options & DNS_RBTFIND_EMPTYDATA) != 0)) { ++ (WANTEMPTYDATA_OR_DATA(options, current))) { + /* + * Found an exact match. + */ +@@ -2016,11 +2018,11 @@ dns_rbt_findname(dns_rbt_t *rbt, const dns_name_t *name, unsigned int options, + result = dns_rbt_findnode(rbt, name, foundname, &node, NULL, + options, NULL, NULL); + +- if (node != NULL && +- (DATA(node) != NULL || (options & DNS_RBTFIND_EMPTYDATA) != 0)) ++ if (node != NULL && WANTEMPTYDATA_OR_DATA(options, node)) { + *data = DATA(node); +- else ++ } else { + result = ISC_R_NOTFOUND; ++ } + + return (result); + } +@@ -2857,9 +2859,10 @@ deletetreeflat(dns_rbt_t *rbt, unsigned int quantum, bool unhash, + dns_rbtnode_t *node = root; + root = PARENT(root); + +- if (DATA(node) != NULL && rbt->data_deleter != NULL) ++ if (rbt->data_deleter != NULL && DATA(node) != NULL) { + rbt->data_deleter(DATA(node), + rbt->deleter_arg); ++ } + if (unhash) + unhash_node(rbt, node); + /* +-- +2.23.0 + diff --git a/backport-0037-Test-if-linked-while-holding-the-queue-lock.patch b/backport-0037-Test-if-linked-while-holding-the-queue-lock.patch new file mode 100644 index 0000000..3ac85f1 --- /dev/null +++ b/backport-0037-Test-if-linked-while-holding-the-queue-lock.patch @@ -0,0 +1,67 @@ +From 72cbe648c47c9b8de34908531b08d191539dc902 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 27 Aug 2020 20:40:08 +1000 +Subject: [PATCH] Test if linked while holding the queue lock + +WARNING: ThreadSanitizer: data race + Read of size 8 at 0x000000000001 by thread T1: + #0 client_shutdown bin/named/client.c:849:6 + #1 dispatch lib/isc/task.c:1157:7 + #2 run lib/isc/task.c:1331:2 + + Previous write of size 8 at 0x000000000001 by thread T2 (mutexes: write M1, write M2): + #0 client_shutdown bin/named/client.c:850:3 + #1 dispatch lib/isc/task.c:1157:7 + #2 run lib/isc/task.c:1331:2 +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/72cbe648c47c9b8de34908531b08d191539dc902 +--- + bin/named/client.c | 3 +-- + lib/isc/include/isc/queue.h | 19 +++++++++++++++++++ + 2 files changed, 20 insertions(+), 2 deletions(-) + +diff --git a/bin/named/client.c b/bin/named/client.c +index 8b3227ad62..978e4d91b8 100644 +--- a/bin/named/client.c ++++ b/bin/named/client.c +@@ -850,8 +850,7 @@ client_shutdown(isc_task_t *task, isc_event_t *event) { + client->shutdown_arg = NULL; + } + +- if (ISC_QLINK_LINKED(client, ilink)) +- ISC_QUEUE_UNLINK(client->manager->inactive, client, ilink); ++ ISC_QUEUE_UNLINKIFLINKED(client->manager->inactive, client, ilink); + + client->newstate = NS_CLIENTSTATE_FREED; + client->needshutdown = false; +diff --git a/lib/isc/include/isc/queue.h b/lib/isc/include/isc/queue.h +index d682ba4940..416cefcb5d 100644 +--- a/lib/isc/include/isc/queue.h ++++ b/lib/isc/include/isc/queue.h +@@ -154,4 +154,23 @@ + (elt)->link.next = (elt)->link.prev = (void *)(-1); \ + } while(0) + ++#define ISC_QUEUE_UNLINKIFLINKED(queue, elt, link) \ ++ do { \ ++ LOCK(&(queue).headlock); \ ++ LOCK(&(queue).taillock); \ ++ if (ISC_QLINK_LINKED(elt, link)) { \ ++ if ((elt)->link.prev == NULL) \ ++ (queue).head = (elt)->link.next; \ ++ else \ ++ (elt)->link.prev->link.next = (elt)->link.next; \ ++ if ((elt)->link.next == NULL) \ ++ (queue).tail = (elt)->link.prev; \ ++ else \ ++ (elt)->link.next->link.prev = (elt)->link.prev; \ ++ } \ ++ UNLOCK(&(queue).taillock); \ ++ UNLOCK(&(queue).headlock); \ ++ (elt)->link.next = (elt)->link.prev = (void *)(-1); \ ++ } while(0) ++ + #endif /* ISC_QUEUE_H */ +-- +2.23.0 + diff --git a/backport-0038-Address-data-race-in-dns_adbentry_overquota.patch b/backport-0038-Address-data-race-in-dns_adbentry_overquota.patch new file mode 100644 index 0000000..2b11b59 --- /dev/null +++ b/backport-0038-Address-data-race-in-dns_adbentry_overquota.patch @@ -0,0 +1,98 @@ +From 674a21946613ac624e22482ac8c66000fcc00792 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Wed, 26 Aug 2020 15:50:53 +1000 +Subject: [PATCH] Address data race in dns_adbentry_overquota + + Read of size 4 at 0x7b440003da70 by thread T8: + #0 dns_adbentry_overquota /builds/isc-projects/bind9/lib/dns/adb.c:4797:39 (libdns.so.1110+0x4da82) + #1 fctx_try /builds/isc-projects/bind9/lib/dns/resolver.c:3838:10 (libdns.so.1110+0x17e229) + #2 fctx_start /builds/isc-projects/bind9/lib/dns/resolver.c:4219:4 (libdns.so.1110+0x1787a3) + #3 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507f5) + #4 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d749) + + Previous write of size 4 at 0x7b440003da70 by thread T12 (mutexes: write M549293692588722840): + #0 dns_adb_beginudpfetch /builds/isc-projects/bind9/lib/dns/adb.c:4811:21 (libdns.so.1110+0x4db82) + #1 fctx_query /builds/isc-projects/bind9/lib/dns/resolver.c:1901:3 (libdns.so.1110+0x1814ee) + #2 fctx_try /builds/isc-projects/bind9/lib/dns/resolver.c:3863:11 (libdns.so.1110+0x17e2fa) + #3 fctx_start /builds/isc-projects/bind9/lib/dns/resolver.c:4219:4 (libdns.so.1110+0x1787a3) + #4 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507f5) + #5 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d749) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/674a21946613ac624e22482ac8c66000fcc00792 +--- + lib/dns/adb.c | 10 +++++++++- + lib/dns/include/dns/adb.h | 2 +- + lib/dns/resolver.c | 6 +++--- + 3 files changed, 13 insertions(+), 5 deletions(-) + +diff --git a/lib/dns/adb.c b/lib/dns/adb.c +index 5021a17fba..d6f56fa6d3 100644 +--- a/lib/dns/adb.c ++++ b/lib/dns/adb.c +@@ -4791,10 +4791,18 @@ dns_adb_setquota(dns_adb_t *adb, uint32_t quota, uint32_t freq, + } + + bool +-dns_adbentry_overquota(dns_adbentry_t *entry) { ++dns_adbentry_overquota(dns_adb_t *adb, dns_adbentry_t *entry) { ++ int bucket; + bool block; ++ + REQUIRE(DNS_ADBENTRY_VALID(entry)); ++ ++ bucket = entry->lock_bucket; ++ ++ LOCK(&adb->entrylocks[bucket]); + block = (entry->quota != 0 && entry->active >= entry->quota); ++ UNLOCK(&adb->entrylocks[bucket]); ++ + return (block); + } + +diff --git a/lib/dns/include/dns/adb.h b/lib/dns/include/dns/adb.h +index edf6e54935..9e328ef5ad 100644 +--- a/lib/dns/include/dns/adb.h ++++ b/lib/dns/include/dns/adb.h +@@ -815,7 +815,7 @@ dns_adb_setquota(dns_adb_t *adb, uint32_t quota, uint32_t freq, + */ + + bool +-dns_adbentry_overquota(dns_adbentry_t *entry); ++dns_adbentry_overquota(dns_adb_t *adb, dns_adbentry_t *entry); + /*%< + * Returns true if the specified ADB has too many active fetches. + * +diff --git a/lib/dns/resolver.c b/lib/dns/resolver.c +index cf6c00100c..5984e00ab8 100644 +--- a/lib/dns/resolver.c ++++ b/lib/dns/resolver.c +@@ -1894,7 +1894,7 @@ fctx_query(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, + query->connects++; + QTRACE("connecting via TCP"); + } else { +- if (dns_adbentry_overquota(addrinfo->entry)) ++ if (dns_adbentry_overquota(fctx->adb, addrinfo->entry)) + goto cleanup_dispatch; + + /* Inform the ADB that we're starting a UDP fetch */ +@@ -3809,7 +3809,7 @@ fctx_try(fetchctx_t *fctx, bool retrying, bool badcache) { + addrinfo = fctx_nextaddress(fctx); + + /* Try to find an address that isn't over quota */ +- while (addrinfo != NULL && dns_adbentry_overquota(addrinfo->entry)) ++ while (addrinfo != NULL && dns_adbentry_overquota(fctx->adb, addrinfo->entry)) + addrinfo = fctx_nextaddress(fctx); + + if (addrinfo == NULL) { +@@ -3835,7 +3835,7 @@ fctx_try(fetchctx_t *fctx, bool retrying, bool badcache) { + addrinfo = fctx_nextaddress(fctx); + + while (addrinfo != NULL && +- dns_adbentry_overquota(addrinfo->entry)) ++ dns_adbentry_overquota(fctx->adb, addrinfo->entry)) + addrinfo = fctx_nextaddress(fctx); + + /* +-- +2.23.0 + diff --git a/backport-0039-Address-lock-order-inversion.patch b/backport-0039-Address-lock-order-inversion.patch new file mode 100644 index 0000000..35c0c5b --- /dev/null +++ b/backport-0039-Address-lock-order-inversion.patch @@ -0,0 +1,75 @@ +From 505e3381564294e6b8c7a0f69883e7d4aab0efe8 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Mon, 24 Aug 2020 13:35:41 +1000 +Subject: [PATCH] Address lock-order-inversion + +WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) (pid=12714) + Cycle in lock order graph: M100252 (0x7b7c00010a08) => M1171 (0x7b7400000dc8) => M100252 + + Mutex M1171 acquired here while holding mutex M100252 in thread T1: + #0 pthread_mutex_lock (delv+0x4483a6) + #1 dns_resolver_createfetch3 /builds/isc-projects/bind9/lib/dns/resolver.c:9585:2 (libdns.so.1110+0x1769fd) + #2 dns_resolver_createfetch /builds/isc-projects/bind9/lib/dns/resolver.c:9504:10 (libdns.so.1110+0x174e17) + #3 create_fetch /builds/isc-projects/bind9/lib/dns/validator.c:1156:10 (libdns.so.1110+0x1c1e5f) + #4 validatezonekey /builds/isc-projects/bind9/lib/dns/validator.c:2124:13 (libdns.so.1110+0x1c3b6d) + #5 start_positive_validation /builds/isc-projects/bind9/lib/dns/validator.c:2301:10 (libdns.so.1110+0x1bfde9) + #6 validator_start /builds/isc-projects/bind9/lib/dns/validator.c:3647:12 (libdns.so.1110+0x1bef62) + #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) + #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) + + Mutex M100252 previously acquired by the same thread here: + #0 pthread_mutex_lock (delv+0x4483a6) + #1 validator_start /builds/isc-projects/bind9/lib/dns/validator.c:3628:2 (libdns.so.1110+0x1bee31) + #2 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) + #3 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) + + Mutex M100252 acquired here while holding mutex M1171 in thread T1: + #0 pthread_mutex_lock (delv+0x4483a6) + #1 dns_validator_destroy /builds/isc-projects/bind9/lib/dns/validator.c:3912:2 (libdns.so.1110+0x1bf788) + #2 validated /builds/isc-projects/bind9/lib/dns/resolver.c:4916:2 (libdns.so.1110+0x18fdfd) + #3 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) + #4 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) + + Mutex M1171 previously acquired by the same thread here: + #0 pthread_mutex_lock (delv+0x4483a6) + #1 validated /builds/isc-projects/bind9/lib/dns/resolver.c:4907:2 (libdns.so.1110+0x18fc3d) + #2 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) + #3 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) + + Thread T1 'isc-worker0000' (tid=12729, running) created by main thread at: + #0 pthread_create (delv+0x42afdb) + #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) + #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) + #3 isc_taskmgr_createinctx /builds/isc-projects/bind9/lib/isc/task.c:2091:11 (libisc.so.1107+0x4f4ac) + #4 main /builds/isc-projects/bind9/bin/delv/delv.c:1639:2 (delv+0x4b7f96) + +SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) (/builds/isc-projects/bind9/bin/delv/.libs/delv+0x4483a6) in pthread_mutex_lock +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/505e3381564294e6b8c7a0f69883e7d4aab0efe8 +--- + lib/dns/resolver.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/lib/dns/resolver.c b/lib/dns/resolver.c +index 1146f2c62f..cf6c00100c 100644 +--- a/lib/dns/resolver.c ++++ b/lib/dns/resolver.c +@@ -4908,6 +4908,7 @@ validated(isc_task_t *task, isc_event_t *event) { + + ISC_LIST_UNLINK(fctx->validators, vevent->validator, link); + fctx->validator = NULL; ++ UNLOCK(&res->buckets[bucketnum].lock); + + /* + * Destroy the validator early so that we can +@@ -4918,6 +4919,7 @@ validated(isc_task_t *task, isc_event_t *event) { + + negative = (vevent->rdataset == NULL); + ++ LOCK(&res->buckets[bucketnum].lock); + sentresponse = ((fctx->options & DNS_FETCHOPT_NOVALIDATE) != 0); + + /* +-- +2.23.0 + diff --git a/backport-0040-Prevent-loads_pending-going-to-zero-while-kicking-th.patch b/backport-0040-Prevent-loads_pending-going-to-zero-while-kicking-th.patch new file mode 100644 index 0000000..b3ae5d3 --- /dev/null +++ b/backport-0040-Prevent-loads_pending-going-to-zero-while-kicking-th.patch @@ -0,0 +1,33 @@ +From 1f7ccaba1bbb245cf06918fd397a0a1743f263f1 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Fri, 21 Aug 2020 18:35:38 +1000 +Subject: [PATCH] Prevent loads_pending going to zero while kicking the loads. + +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/1f7ccaba1bbb245cf06918fd397a0a1743f263f1 +--- + lib/dns/zt.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/lib/dns/zt.c b/lib/dns/zt.c +index 04d7823d8b..8098b90d1f 100644 +--- a/lib/dns/zt.c ++++ b/lib/dns/zt.c +@@ -296,9 +296,13 @@ dns_zt_asyncload2(dns_zt_t *zt, dns_zt_allloaded_t alldone, void *arg, + RWLOCK(&zt->rwlock, isc_rwlocktype_write); + + INSIST(zt->loads_pending == 0); ++ /* ++ * Prevent loads_pending going to zero while kicking off the loads. ++ */ ++ zt->loads_pending++; + result = dns_zt_apply2(zt, false, NULL, asyncload, ¶ms); + +- pending = zt->loads_pending; ++ pending = --zt->loads_pending; + if (pending != 0) { + zt->loaddone = alldone; + zt->loaddone_arg = arg; +-- +2.23.0 + diff --git a/backport-0041-Address-data-races-between-socket-bitfields.patch b/backport-0041-Address-data-races-between-socket-bitfields.patch new file mode 100644 index 0000000..00a580c --- /dev/null +++ b/backport-0041-Address-data-races-between-socket-bitfields.patch @@ -0,0 +1,95 @@ +From 16e7e2732879ab53d3620705807f5cbd0aace43e Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Fri, 21 Aug 2020 15:50:57 +1000 +Subject: [PATCH] Address data races between socket bitfields + +* address data race between sock->pending_recv and sock->connected +* address data race between sock->bound and sock->pending_recv + +================== +WARNING: ThreadSanitizer: data race (pid=1985) + Read of size 2 at 0x7b54000c07c0 by thread T6: + #0 isc__socket_sendto /builds/isc-projects/bind9/lib/isc/unix/socket.c:5291:2 (libisc.so.1107+0x65a00) + #1 isc__socket_send /builds/isc-projects/bind9/lib/isc/unix/socket.c:5270:10 (libisc.so.1107+0x65944) + #2 isc_socket_send /builds/isc-projects/bind9/lib/isc/unix/./../socket_api.c:329:10 (libisc.so.1107+0x6b3c9) + #3 sendstream /builds/isc-projects/bind9/bin/named/xfrout.c:1548:3 (named+0x555038) + #4 ns_xfr_start /builds/isc-projects/bind9/bin/named/xfrout.c:1132:2 (named+0x553147) + #5 ns_query_start /builds/isc-projects/bind9/bin/named/query.c:9572:4 (named+0x4f3329) + #6 client_request /builds/isc-projects/bind9/bin/named/client.c:3115:3 (named+0x4de6af) + #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x50845) + #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d799) + + Previous write of size 2 at 0x7b54000c07c0 by thread T14 (mutexes: write M57, write M855819529908651432): + #0 dispatch_recv /builds/isc-projects/bind9/lib/isc/unix/socket.c:3353:21 (libisc.so.1107+0x6c601) + #1 process_fd /builds/isc-projects/bind9/lib/isc/unix/socket.c:4048:5 (libisc.so.1107+0x6c1be) + #2 process_fds /builds/isc-projects/bind9/lib/isc/unix/socket.c:4161:3 (libisc.so.1107+0x6bfc0) + #3 watcher /builds/isc-projects/bind9/lib/isc/unix/socket.c:4407:10 (libisc.so.1107+0x64398) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/16e7e2732879ab53d3620705807f5cbd0aace43e +--- + lib/isc/unix/socket.c | 19 ++++++------------- + 1 file changed, 6 insertions(+), 13 deletions(-) + +diff --git a/lib/isc/unix/socket.c b/lib/isc/unix/socket.c +index 2b034d1762..0ebdf4c345 100644 +--- a/lib/isc/unix/socket.c ++++ b/lib/isc/unix/socket.c +@@ -5175,7 +5175,6 @@ socket_send(isc__socket_t *sock, isc_socketevent_t *dev, isc_task_t *task, + unsigned int flags) + { + int io_state; +- bool have_lock = false; + isc_task_t *ntask = NULL; + isc_result_t result = ISC_R_SUCCESS; + +@@ -5201,12 +5200,10 @@ socket_send(isc__socket_t *sock, isc_socketevent_t *dev, isc_task_t *task, + } + } + +- if (sock->type == isc_sockettype_udp) ++ LOCK(&sock->lock); ++ if (sock->type == isc_sockettype_udp) { + io_state = doio_send(sock, dev); +- else { +- LOCK(&sock->lock); +- have_lock = true; +- ++ } else { + if (ISC_LIST_EMPTY(sock->send_list)) + io_state = doio_send(sock, dev); + else +@@ -5223,11 +5220,6 @@ socket_send(isc__socket_t *sock, isc_socketevent_t *dev, isc_task_t *task, + isc_task_attach(task, &ntask); + dev->attributes |= ISC_SOCKEVENTATTR_ATTACHED; + +- if (!have_lock) { +- LOCK(&sock->lock); +- have_lock = true; +- } +- + /* + * Enqueue the request. If the socket was previously + * not being watched, poke the watcher to start +@@ -5257,8 +5249,7 @@ socket_send(isc__socket_t *sock, isc_socketevent_t *dev, isc_task_t *task, + break; + } + +- if (have_lock) +- UNLOCK(&sock->lock); ++ UNLOCK(&sock->lock); + + return (result); + } +@@ -5291,7 +5282,9 @@ isc__socket_sendto(isc_socket_t *sock0, isc_region_t *region, + manager = sock->manager; + REQUIRE(VALID_MANAGER(manager)); + ++ LOCK(&sock->lock); + INSIST(sock->bound); ++ UNLOCK(&sock->lock); + + dev = allocate_socketevent(manager->mctx, sock, + ISC_SOCKEVENT_SENDDONE, action, arg); +-- +2.23.0 + diff --git a/backport-0042-Only-read-dns_master_indent-and-dns_master_indentstr.patch b/backport-0042-Only-read-dns_master_indent-and-dns_master_indentstr.patch new file mode 100644 index 0000000..1039c37 --- /dev/null +++ b/backport-0042-Only-read-dns_master_indent-and-dns_master_indentstr.patch @@ -0,0 +1,172 @@ +From 054dc48a1f21dc249df613c89658accaf9d21986 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Sat, 22 Aug 2020 01:52:09 +1000 +Subject: [PATCH] Only read dns_master_indent and dns_master_indentstr in named + +The old code was not thread safe w.r.t. to the use of these variable. +We now only set them at the start of execution and copy them to +the message structure so they can be safely updated. This is the +minimal change to make them thread safe. +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/054dc48a1f21dc249df613c89658accaf9d21986 +--- + lib/dns/include/dns/message.h | 2 ++ + lib/dns/include/dns/types.h | 5 +++++ + lib/dns/message.c | 30 ++++++++++++++++-------------- + 3 files changed, 23 insertions(+), 14 deletions(-) + +diff --git a/lib/dns/include/dns/message.h b/lib/dns/include/dns/message.h +index 0072d5a8f2..d567fae63b 100644 +--- a/lib/dns/include/dns/message.h ++++ b/lib/dns/include/dns/message.h +@@ -263,6 +263,8 @@ struct dns_message { + + dns_rdatasetorderfunc_t order; + const void * order_arg; ++ ++ dns_indent_t indent; + }; + + struct dns_ednsopt { +diff --git a/lib/dns/include/dns/types.h b/lib/dns/include/dns/types.h +index 567e8a879e..a497c04bab 100644 +--- a/lib/dns/include/dns/types.h ++++ b/lib/dns/include/dns/types.h +@@ -385,6 +385,11 @@ typedef enum { + dns_stale_answer_conf + } dns_stale_answer_t; + ++typedef struct { ++ const char *string; ++ size_t count; ++} dns_indent_t; ++ + /* + * Functions. + */ +diff --git a/lib/dns/message.c b/lib/dns/message.c +index 9dafd69f11..6a6151952c 100644 +--- a/lib/dns/message.c ++++ b/lib/dns/message.c +@@ -454,6 +454,8 @@ msginit(dns_message_t *m) { + m->tkey = 0; + m->rdclass_set = 0; + m->querytsig = NULL; ++ m->indent.string = dns_master_indentstr; ++ m->indent.count = dns_master_indent; + } + + static inline void +@@ -3298,8 +3300,8 @@ dns_message_checksig(dns_message_t *msg, dns_view_t *view) { + if ((__flags & DNS_STYLEFLAG_INDENT) == 0ULL && \ + (__flags & DNS_STYLEFLAG_YAML) == 0ULL) \ + break; \ +- for (__i = 0; __i < dns_master_indent; __i++) { \ +- ADD_STRING(target, dns_master_indentstr); \ ++ for (__i = 0; __i < msg->indent.count; __i++) { \ ++ ADD_STRING(target, msg->indent.string); \ + } \ + } while (0) + +@@ -3319,7 +3321,7 @@ dns_message_sectiontotext(dns_message_t *msg, dns_section_t section, + REQUIRE(target != NULL); + REQUIRE(VALID_SECTION(section)); + +- saveindent = dns_master_indent; ++ saveindent = msg->indent.count; + sflags = dns_master_styleflags(style); + if (ISC_LIST_EMPTY(msg->sections[section])) + goto cleanup; +@@ -3349,7 +3351,7 @@ dns_message_sectiontotext(dns_message_t *msg, dns_section_t section, + goto cleanup; + } + if ((sflags & DNS_STYLEFLAG_YAML) != 0) { +- dns_master_indent++; ++ msg->indent.count++; + } + do { + name = NULL; +@@ -3389,7 +3391,7 @@ dns_message_sectiontotext(dns_message_t *msg, dns_section_t section, + result = dns_message_nextname(msg, section); + } while (result == ISC_R_SUCCESS); + if ((sflags & DNS_STYLEFLAG_YAML) != 0) { +- dns_master_indent--; ++ msg->indent.count--; + } + if ((flags & DNS_MESSAGETEXTFLAG_NOHEADERS) == 0 && + (flags & DNS_MESSAGETEXTFLAG_NOCOMMENTS) == 0 && +@@ -3402,7 +3404,7 @@ dns_message_sectiontotext(dns_message_t *msg, dns_section_t section, + result = ISC_R_SUCCESS; + + cleanup: +- dns_master_indent = saveindent; ++ msg->indent.count = saveindent; + return (result); + } + +@@ -3519,7 +3521,7 @@ dns_message_pseudosectiontoyaml(dns_message_t *msg, + isc_buffer_t optbuf; + uint16_t optcode, optlen; + unsigned char *optdata; +- unsigned int saveindent = dns_master_indent; ++ unsigned int saveindent = msg->indent.count; + unsigned int optindent; + + REQUIRE(DNS_MESSAGE_VALID(msg)); +@@ -3535,11 +3537,11 @@ dns_message_pseudosectiontoyaml(dns_message_t *msg, + + INDENT(style); + ADD_STRING(target, "OPT_PSEUDOSECTION:\n"); +- dns_master_indent++; ++ msg->indent.count++; + + INDENT(style); + ADD_STRING(target, "EDNS:\n"); +- dns_master_indent++; ++ msg->indent.count++; + + INDENT(style); + ADD_STRING(target, "version: "); +@@ -3583,10 +3585,10 @@ dns_message_pseudosectiontoyaml(dns_message_t *msg, + + isc_buffer_init(&optbuf, rdata.data, rdata.length); + isc_buffer_add(&optbuf, rdata.length); +- optindent = dns_master_indent; ++ optindent = msg->indent.count; + while (isc_buffer_remaininglength(&optbuf) != 0) { + bool extra_text = false; +- dns_master_indent = optindent; ++ msg->indent.count = optindent; + INSIST(isc_buffer_remaininglength(&optbuf) >= 4U); + optcode = isc_buffer_getuint16(&optbuf); + optlen = isc_buffer_getuint16(&optbuf); +@@ -3672,7 +3674,7 @@ dns_message_pseudosectiontoyaml(dns_message_t *msg, + if (optlen >= 2U) { + uint16_t ede; + ADD_STRING(target, ":\n"); +- dns_master_indent++; ++ msg->indent.count++; + INDENT(style); + ADD_STRING(target, "INFO-CODE:"); + ede = isc_buffer_getuint16(&optbuf); +@@ -3816,7 +3818,7 @@ dns_message_pseudosectiontoyaml(dns_message_t *msg, + } + ADD_STRING(target, "\n"); + } +- dns_master_indent = optindent; ++ msg->indent.count = optindent; + result = ISC_R_SUCCESS; + goto cleanup; + case DNS_PSEUDOSECTION_TSIG: +@@ -3848,7 +3850,7 @@ dns_message_pseudosectiontoyaml(dns_message_t *msg, + result = ISC_R_UNEXPECTED; + + cleanup: +- dns_master_indent = saveindent; ++ msg->indent.count = saveindent; + return (result); + } + +-- +2.23.0 + diff --git a/backport-0043-Defer-read-of-zl-server-and-zl-reconfig-until.patch b/backport-0043-Defer-read-of-zl-server-and-zl-reconfig-until.patch new file mode 100644 index 0000000..477bfde --- /dev/null +++ b/backport-0043-Defer-read-of-zl-server-and-zl-reconfig-until.patch @@ -0,0 +1,41 @@ +From 00d315046bd38a660ce885c64b01fb11f2ab8574 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Fri, 21 Aug 2020 19:51:59 +1000 +Subject: [PATCH] Defer read of zl->server and zl->reconfig until + +the reference counter has gone to zero and there is +nolonger a possibility of changes in other threads. +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/00d315046bd38a660ce885c64b01fb11f2ab8574 +--- + bin/named/server.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/bin/named/server.c b/bin/named/server.c +index 79d5d4c00b..6aca0224c7 100644 +--- a/bin/named/server.c ++++ b/bin/named/server.c +@@ -8725,8 +8725,8 @@ static isc_result_t + view_loaded(void *arg) { + isc_result_t result; + ns_zoneload_t *zl = (ns_zoneload_t *) arg; +- ns_server_t *server = zl->server; +- bool reconfig = zl->reconfig; ++ ns_server_t *server; ++ bool reconfig; + unsigned int refs; + + +@@ -8742,6 +8742,9 @@ view_loaded(void *arg) { + if (refs != 0) + return (ISC_R_SUCCESS); + ++ server = zl->server; ++ reconfig = zl->reconfig; ++ + isc_refcount_destroy(&zl->refs); + isc_mem_put(server->mctx, zl, sizeof (*zl)); + +-- +2.23.0 + diff --git a/backport-0044-Use-a-reference-counter-for-zt.patch b/backport-0044-Use-a-reference-counter-for-zt.patch new file mode 100644 index 0000000..6b926f2 --- /dev/null +++ b/backport-0044-Use-a-reference-counter-for-zt.patch @@ -0,0 +1,219 @@ +From 7db778854e35f2dcea2a08207110dfed9366e8a5 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Mon, 31 Aug 2020 19:28:38 +1000 +Subject: [PATCH] Use a reference counter for zt + +WARNING: ThreadSanitizer: data race + Write of size 8 at 0x000000000001 by thread T1 (mutexes: write M1): + #0 memset + #1 mem_put lib/isc/mem.c:819 + #2 isc___mem_free lib/isc/mem.c:1662 + #3 isc__mem_free lib/isc/mem.c:3078 + #4 isc___mem_putanddetach lib/isc/mem.c:1221 + #5 isc__mem_putanddetach lib/isc/mem.c:3033 + #6 zt_destroy lib/dns/zt.c:214 + #7 doneloading lib/dns/zt.c:591 + #8 zone_asyncload lib/dns/zone.c:2243 + #9 dispatch lib/isc/task.c:1157 + #10 run lib/isc/task.c:1331 + #11 + + Previous atomic read of size 8 at 0x000000000001 by thread T2: + #0 __tsan_atomic64_load + #1 isc_rwlock_unlock lib/isc/rwlock.c:612 + #2 doneloading lib/dns/zt.c:585 + #3 zone_asyncload lib/dns/zone.c:2243 + #4 dispatch lib/isc/task.c:1157 + #5 run lib/isc/task.c:1331 + #6 + + Location is heap block of size 273 at 0x000000000015 allocated by thread T3: + #0 malloc + #1 internal_memalloc lib/isc/mem.c:887 + #2 mem_get lib/isc/mem.c:792 + #3 mem_allocateunlocked lib/isc/mem.c:1545 + #4 isc___mem_allocate lib/isc/mem.c:1566 + #5 isc__mem_allocate lib/isc/mem.c:3048 + #6 isc___mem_get lib/isc/mem.c:1304 + #7 isc__mem_get lib/isc/mem.c:3012 + #8 dns_zt_create lib/dns/zt.c:85 + #9 dns_view_create lib/dns/view.c:126 + #10 create_view server.c:5312 + #11 load_configuration server.c:8101 + #12 loadconfig server.c:9428 + #13 ns_server_reconfigcommand server.c:9763 + #14 ns_control_docommand bin/named/control.c:243 + #15 control_recvmessage bin/named/controlconf.c:465 + #16 dispatch lib/isc/task.c:1157 + #17 run lib/isc/task.c:1331 + #18 +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/7db778854e35f2dcea2a08207110dfed9366e8a5 +--- + lib/dns/zt.c | 63 ++++++++++++++++++++++------------------------------ + 1 file changed, 26 insertions(+), 37 deletions(-) + +diff --git a/lib/dns/zt.c b/lib/dns/zt.c +index 8098b90d1f..14d8bcd6be 100644 +--- a/lib/dns/zt.c ++++ b/lib/dns/zt.c +@@ -41,9 +41,10 @@ struct dns_zt { + isc_rwlock_t rwlock; + dns_zt_allloaded_t loaddone; + void * loaddone_arg; ++ isc_refcount_t references; ++ + /* Locked by lock. */ +- bool flush; +- uint32_t references; ++ bool flush; + unsigned int loads_pending; + dns_rbt_t *table; + }; +@@ -97,7 +98,7 @@ dns_zt_create(isc_mem_t *mctx, dns_rdataclass_t rdclass, dns_zt_t **ztp) { + + zt->mctx = NULL; + isc_mem_attach(mctx, &zt->mctx); +- zt->references = 1; ++ isc_refcount_init(&zt->references, 1); + zt->flush = false; + zt->rdclass = rdclass; + zt->magic = ZTMAGIC; +@@ -187,13 +188,7 @@ dns_zt_attach(dns_zt_t *zt, dns_zt_t **ztp) { + REQUIRE(VALID_ZT(zt)); + REQUIRE(ztp != NULL && *ztp == NULL); + +- RWLOCK(&zt->rwlock, isc_rwlocktype_write); +- +- INSIST(zt->references > 0); +- zt->references++; +- INSIST(zt->references != 0); +- +- RWUNLOCK(&zt->rwlock, isc_rwlocktype_write); ++ isc_refcount_increment(&zt->references, NULL); + + *ztp = zt; + } +@@ -206,8 +201,10 @@ flush(dns_zone_t *zone, void *uap) { + + static void + zt_destroy(dns_zt_t *zt) { +- if (zt->flush) ++ if (zt->flush) { + (void)dns_zt_apply(zt, false, flush, NULL); ++ } ++ isc_refcount_destroy(&zt->references); + dns_rbt_destroy(&zt->table); + isc_rwlock_destroy(&zt->rwlock); + zt->magic = 0; +@@ -216,28 +213,24 @@ zt_destroy(dns_zt_t *zt) { + + static void + zt_flushanddetach(dns_zt_t **ztp, bool need_flush) { +- bool destroy = false; ++ unsigned int refs; + dns_zt_t *zt; + + REQUIRE(ztp != NULL && VALID_ZT(*ztp)); + + zt = *ztp; ++ *ztp = NULL; + +- RWLOCK(&zt->rwlock, isc_rwlocktype_write); +- +- INSIST(zt->references > 0); +- zt->references--; +- if (zt->references == 0) +- destroy = true; +- if (need_flush) ++ if (need_flush) { ++ RWLOCK(&zt->rwlock, isc_rwlocktype_write); + zt->flush = true; ++ RWUNLOCK(&zt->rwlock, isc_rwlocktype_write); ++ } + +- RWUNLOCK(&zt->rwlock, isc_rwlocktype_write); +- +- if (destroy) ++ isc_refcount_decrement(&zt->references, &refs); ++ if (refs == 0) { + zt_destroy(zt); +- +- *ztp = NULL; ++ } + } + + void +@@ -301,13 +294,11 @@ dns_zt_asyncload2(dns_zt_t *zt, dns_zt_allloaded_t alldone, void *arg, + */ + zt->loads_pending++; + result = dns_zt_apply2(zt, false, NULL, asyncload, ¶ms); +- + pending = --zt->loads_pending; + if (pending != 0) { + zt->loaddone = alldone; + zt->loaddone_arg = arg; + } +- + RWUNLOCK(&zt->rwlock, isc_rwlocktype_write); + + if (pending == 0) +@@ -329,18 +320,18 @@ asyncload(dns_zone_t *zone, void *paramsv) { + dns_zt_t *zt; + + REQUIRE(zone != NULL); +- zt = dns_zone_getview(zone)->zonetable; ++ zt = params->zt; + INSIST(VALID_ZT(zt)); + +- INSIST(zt->references > 0); +- zt->references++; ++ isc_refcount_increment(&zt->references, NULL); + zt->loads_pending++; + + result = dns_zone_asyncload2(zone, *params->dl, zt, params->newonly); + if (result != ISC_R_SUCCESS) { +- zt->references--; ++ unsigned int refs; + zt->loads_pending--; +- INSIST(zt->references > 0); ++ isc_refcount_decrement(&zt->references, &refs); ++ INSIST(refs > 0); + } + return (ISC_R_SUCCESS); + } +@@ -560,7 +551,7 @@ dns_zt_apply2(dns_zt_t *zt, bool stop, isc_result_t *sub, + */ + static isc_result_t + doneloading(dns_zt_t *zt, dns_zone_t *zone, isc_task_t *task) { +- bool destroy = false; ++ unsigned int refs; + dns_zt_allloaded_t alldone = NULL; + void *arg = NULL; + +@@ -571,10 +562,6 @@ doneloading(dns_zt_t *zt, dns_zone_t *zone, isc_task_t *task) { + + RWLOCK(&zt->rwlock, isc_rwlocktype_write); + INSIST(zt->loads_pending != 0); +- INSIST(zt->references != 0); +- zt->references--; +- if (zt->references == 0) +- destroy = true; + zt->loads_pending--; + if (zt->loads_pending == 0) { + alldone = zt->loaddone; +@@ -587,8 +574,10 @@ doneloading(dns_zt_t *zt, dns_zone_t *zone, isc_task_t *task) { + if (alldone != NULL) + alldone(arg); + +- if (destroy) ++ isc_refcount_decrement(&zt->references, &refs); ++ if (refs == 0) { + zt_destroy(zt); ++ } + + return (ISC_R_SUCCESS); + } +-- +2.23.0 + diff --git a/backport-0045-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch b/backport-0045-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch new file mode 100644 index 0000000..27b3710 --- /dev/null +++ b/backport-0045-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch @@ -0,0 +1,119 @@ +From 36849cbfa70ce84cc10b93da6482125ee7927992 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Fri, 4 Sep 2020 14:18:17 +1000 +Subject: [PATCH] Pause dbiterator to release rwlock to prevent + lock-order-inversion. + + WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) + Cycle in lock order graph: M1 (0x000000000001) => M2 (0x000000000002) => M3 (0x000000000000) => M1 + + Mutex M2 acquired here while holding mutex M1 in thread T1: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 findnodeintree lib/dns/rbtdb.c:2877:2 + #3 findnode lib/dns/rbtdb.c:2941:10 + #4 dns_db_findnode lib/dns/db.c:439:11 + #5 copy_non_dnssec_records lib/dns/zone.c:16031:11 + #6 receive_secure_db lib/dns/zone.c:16163:12 + #7 dispatch lib/isc/task.c:1152:7 + #8 run lib/isc/task.c:1344:2 + + Mutex M1 previously acquired by the same thread here: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 resume_iteration lib/dns/rbtdb.c:9357:2 + #3 dbiterator_first lib/dns/rbtdb.c:9407:3 + #4 dns_dbiterator_first lib/dns/dbiterator.c:43:10 + #5 receive_secure_db lib/dns/zone.c:16160:16 + #6 dispatch lib/isc/task.c:1152:7 + #7 run lib/isc/task.c:1344:2 + + Mutex M3 acquired here while holding mutex M2 in thread T2: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 zone_sign lib/dns/zone.c:9244:3 + #3 zone_maintenance lib/dns/zone.c:11044:4 + #4 zone_timer lib/dns/zone.c:14087:2 + #5 dispatch lib/isc/task.c:1152:7 + #6 run lib/isc/task.c:1344:2 + + Mutex M2 previously acquired by the same thread here: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 resume_iteration lib/dns/rbtdb.c:9357:2 + #3 dbiterator_next lib/dns/rbtdb.c:9647:3 + #4 dns_dbiterator_next lib/dns/dbiterator.c:87:10 + #5 zone_sign lib/dns/zone.c:9485:13 + #6 zone_maintenance lib/dns/zone.c:11044:4 + #7 zone_timer lib/dns/zone.c:14087:2 + #8 dispatch lib/isc/task.c:1152:7 + #9 run lib/isc/task.c:1344:2 + + Mutex M1 acquired here while holding mutex M3 in thread T3: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 findnodeintree lib/dns/rbtdb.c:2877:2 + #3 findnode lib/dns/rbtdb.c:2941:10 + #4 dns_db_findnode lib/dns/db.c:439:11 + #5 zone_get_from_db lib/dns/zone.c:5602:11 + #6 get_raw_serial lib/dns/zone.c:2520:12 + #7 zone_gotwritehandle lib/dns/zone.c:2559:4 + #8 dispatch lib/isc/task.c:1152:7 + #9 run lib/isc/task.c:1344:2 + + Mutex M3 previously acquired by the same thread here: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 zone_gotwritehandle lib/dns/zone.c:2552:2 + #3 dispatch lib/isc/task.c:1152:7 + #4 run lib/isc/task.c:1344:2 + + Thread T1 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create lib/isc/pthreads/thread.c:73:8 + #2 isc_taskmgr_create lib/isc/task.c:1434:3 + #3 create_managers bin/named/main.c:915:11 + #4 setup bin/named/main.c:1223:11 + #5 main bin/named/main.c:1523:2 + + Thread T2 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create lib/isc/pthreads/thread.c:73:8 + #2 isc_taskmgr_create lib/isc/task.c:1434:3 + #3 create_managers bin/named/main.c:915:11 + #4 setup bin/named/main.c:1223:11 + #5 main bin/named/main.c:1523:2 + + Thread T3 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create lib/isc/pthreads/thread.c:73:8 + #2 isc_taskmgr_create lib/isc/task.c:1434:3 + #3 create_managers bin/named/main.c:915:11 + #4 setup bin/named/main.c:1223:11 + #5 main bin/named/main.c:1523:2 + + SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) in pthread_rwlock_rdlock + +(cherry picked from commit e185e37137729c6e377e65d8744c49aef843046f) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/36849cbfa70ce84cc10b93da6482125ee7927992 +--- + lib/dns/zone.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index e120dded9e..257f26780c 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -14986,6 +14986,8 @@ copy_non_dnssec_records(dns_db_t *db, dns_db_t *version, dns_db_t *rawdb, + return (ISC_R_SUCCESS); + } + ++ dns_dbiterator_pause(dbiterator); ++ + result = dns_db_findnode(db, name, true, &node); + if (result != ISC_R_SUCCESS) { + goto cleanup; +-- +2.23.0 + diff --git a/backport-0046-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch b/backport-0046-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch new file mode 100644 index 0000000..22af620 --- /dev/null +++ b/backport-0046-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch @@ -0,0 +1,91 @@ +From 38734d80b5ea2bf62a3bba56a42fea3e3a4a93f4 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Fri, 4 Sep 2020 16:07:57 +1000 +Subject: [PATCH] Pause dbiterator to release rwlock to prevent + lock-order-inversion. + + WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) + Cycle in lock order graph: M1 (0x000000000001) => M2 (0x000000000000) => M1 + + Mutex M2 acquired here while holding mutex M1 in thread T1: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 zone_sign lib/dns/zone.c:9247:3 + #3 zone_maintenance lib/dns/zone.c:11047:4 + #4 zone_timer lib/dns/zone.c:14090:2 + #5 dispatch lib/isc/task.c:1152:7 + #6 run lib/isc/task.c:1344:2 + + Mutex M1 previously acquired by the same thread here: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 resume_iteration lib/dns/rbtdb.c:9357:2 + #3 dbiterator_next lib/dns/rbtdb.c:9647:3 + #4 dns_dbiterator_next lib/dns/dbiterator.c:87:10 + #5 zone_sign lib/dns/zone.c:9488:13 + #6 zone_maintenance lib/dns/zone.c:11047:4 + #7 zone_timer lib/dns/zone.c:14090:2 + #8 dispatch lib/isc/task.c:1152:7 + #9 run lib/isc/task.c:1344:2 + + Mutex M1 acquired here while holding mutex M2 in thread T2: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 findnodeintree lib/dns/rbtdb.c:2877:2 + #3 findnode lib/dns/rbtdb.c:2941:10 + #4 dns_db_findnode lib/dns/db.c:439:11 + #5 dns_db_getsoaserial lib/dns/db.c:780:11 + #6 dump_done lib/dns/zone.c:11428:15 + #7 dump_quantum lib/dns/masterdump.c:1487:2 + #8 dispatch lib/isc/task.c:1152:7 + #9 run lib/isc/task.c:1344:2 + + Mutex M2 previously acquired by the same thread here: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 dump_done lib/dns/zone.c:11426:4 + #3 dump_quantum lib/dns/masterdump.c:1487:2 + #4 dispatch lib/isc/task.c:1152:7 + #5 run lib/isc/task.c:1344:2 + + Thread T1 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create lib/isc/pthreads/thread.c:73:8 + #2 isc_taskmgr_create lib/isc/task.c:1434:3 + #3 create_managers bin/named/main.c:915:11 + #4 setup bin/named/main.c:1223:11 + #5 main bin/named/main.c:1523:2 + + Thread T2 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create lib/isc/pthreads/thread.c:73:8 + #2 isc_taskmgr_create lib/isc/task.c:1434:3 + #3 create_managers bin/named/main.c:915:11 + #4 setup bin/named/main.c:1223:11 + #5 main bin/named/main.c:1523:2 + + SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) in pthread_rwlock_rdlock + +(cherry picked from commit c9dbad97b2f96fcfba3290fe52f6b044af64d780) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/38734d80b5ea2bf62a3bba56a42fea3e3a4a93f4 +--- + lib/dns/zone.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index 55eb1d72ec..b024034e63 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -8645,6 +8645,8 @@ zone_sign(dns_zone_t *zone) { + + while (signing != NULL && nodes-- > 0 && signatures > 0) { + bool has_alg = false; ++ ++ dns_dbiterator_pause(signing->dbiterator); + nextsigning = ISC_LIST_NEXT(signing, link); + + ZONEDB_LOCK(&zone->dblock, isc_rwlocktype_read); +-- +2.23.0 + diff --git a/backport-0047-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch b/backport-0047-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch new file mode 100644 index 0000000..b798841 --- /dev/null +++ b/backport-0047-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch @@ -0,0 +1,80 @@ +From 215526caf4b32968cf407351fe48aed3bb2747e8 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Fri, 4 Sep 2020 16:40:51 +1000 +Subject: [PATCH] Pause dbiterator to release rwlock to prevent + lock-order-inversion. + + WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) + Cycle in lock order graph: M1 (0x000000000000) => M2 (0x000000000001) => M1 + + Mutex M2 acquired here while holding mutex M1 in thread T1: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 getsigningtime lib/dns/rbtdb.c:8198:2 + #3 dns_db_getsigningtime lib/dns/db.c:979:11 + #4 set_resigntime lib/dns/zone.c:3887:11 + #5 dns_zone_markdirty lib/dns/zone.c:11119:4 + #6 update_action lib/ns/update.c:3376:3 + #7 dispatch lib/isc/task.c:1152:7 + #8 run lib/isc/task.c:1344:2 + + Mutex M1 previously acquired by the same thread here: + #0 pthread_mutex_lock + #1 dns_zone_markdirty lib/dns/zone.c:11089:2 + #2 update_action lib/ns/update.c:3376:3 + #3 dispatch lib/isc/task.c:1152:7 + #4 run lib/isc/task.c:1344:2 + + Mutex M1 acquired here while holding mutex M2 in thread T1: + #0 pthread_mutex_lock + #1 zone_nsec3chain lib/dns/zone.c:8502:3 + #2 zone_maintenance lib/dns/zone.c:11056:4 + #3 zone_timer lib/dns/zone.c:14091:2 + #4 dispatch lib/isc/task.c:1152:7 + #5 run lib/isc/task.c:1344:2 + + Mutex M2 previously acquired by the same thread here: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 resume_iteration lib/dns/rbtdb.c:9357:2 + #3 dbiterator_current lib/dns/rbtdb.c:9695:3 + #4 dns_dbiterator_current lib/dns/dbiterator.c:101:10 + #5 zone_nsec3chain lib/dns/zone.c:8539:3 + #6 zone_maintenance lib/dns/zone.c:11056:4 + #7 zone_timer lib/dns/zone.c:14091:2 + #8 dispatch lib/isc/task.c:1152:7 + #9 run lib/isc/task.c:1344:2 + + Thread T1 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create lib/isc/pthreads/thread.c:73:8 + #2 isc_taskmgr_create lib/isc/task.c:1434:3 + #3 create_managers bin/named/main.c:915:11 + #4 setup bin/named/main.c:1223:11 + #5 main bin/named/main.c:1523:2 + + SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) in pthread_rwlock_rdlock + +(cherry picked from commit fbed96220486a063aafdd0a6ada8adce972fd48f) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/215526caf4b32968cf407351fe48aed3bb2747e8 +--- + lib/dns/zone.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index b024034e63..25ed062994 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -7939,6 +7939,8 @@ zone_nsec3chain(dns_zone_t *zone) { + first = true; + buildnsecchain = false; + while (nsec3chain != NULL && nodes-- > 0 && signatures > 0) { ++ dns_dbiterator_pause(nsec3chain->dbiterator); ++ + LOCK_ZONE(zone); + nextnsec3chain = ISC_LIST_NEXT(nsec3chain, link); + UNLOCK_ZONE(zone); +-- +2.23.0 + diff --git a/backport-0048-Pause-dbiterator-ealier-to-prevent-lock-order-invers.patch b/backport-0048-Pause-dbiterator-ealier-to-prevent-lock-order-invers.patch new file mode 100644 index 0000000..75148bc --- /dev/null +++ b/backport-0048-Pause-dbiterator-ealier-to-prevent-lock-order-invers.patch @@ -0,0 +1,75 @@ +From 267fe9a6b70d2d812f0ac6f19945d9488d10ba35 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Tue, 8 Sep 2020 13:42:07 +1000 +Subject: [PATCH] Pause dbiterator ealier to prevent lock-order-inversion + + WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) + Cycle in lock order graph: M1 (0x000000000000) => M2 (0x000000000000) => M1 + + Mutex M2 acquired here while holding mutex M1 in thread T1: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 findnodeintree lib/dns/rbtdb.c:2877:2 + #3 findnode lib/dns/rbtdb.c:2941:10 + #4 dns_db_findnode lib/dns/db.c:439:11 + #5 resume_addnsec3chain lib/dns/zone.c:3776:11 + #6 rss_post lib/dns/zone.c:20659:3 + #7 setnsec3param lib/dns/zone.c:20471:3 + #8 dispatch lib/isc/task.c:1152:7 + #9 run lib/isc/task.c:1344:2 + + Mutex M1 previously acquired by the same thread here: + #0 pthread_mutex_lock + #1 rss_post lib/dns/zone.c:20658:3 + #2 setnsec3param lib/dns/zone.c:20471:3 + #3 dispatch lib/isc/task.c:1152:7 + #4 run lib/isc/task.c:1344:2 + + Mutex M1 acquired here while holding mutex M2 in thread T2: + #0 pthread_mutex_lock + #1 zone_nsec3chain lib/dns/zone.c:8666:5 + #2 zone_maintenance lib/dns/zone.c:11063:4 + #3 zone_timer lib/dns/zone.c:14098:2 + #4 dispatch lib/isc/task.c:1152:7 + #5 run lib/isc/task.c:1344:2 + + Mutex M2 previously acquired by the same thread here: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 resume_iteration lib/dns/rbtdb.c:9357:2 + #3 dbiterator_next lib/dns/rbtdb.c:9647:3 + #4 dns_dbiterator_next lib/dns/dbiterator.c:87:10 + #5 zone_nsec3chain lib/dns/zone.c:8656:13 + #6 zone_maintenance lib/dns/zone.c:11063:4 + #7 zone_timer lib/dns/zone.c:14098:2 + #8 dispatch lib/isc/task.c:1152:7 + #9 run lib/isc/task.c:1344:2 + +(cherry picked from commit 9e584a45114849637c0ab04e9410ba5fc00b054d) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/267fe9a6b70d2d812f0ac6f19945d9488d10ba35 +--- + lib/dns/zone.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index a81733f828..096ff82f34 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -8093,12 +8093,12 @@ zone_nsec3chain(dns_zone_t *zone) { + goto same_removechain; + } + if (result == ISC_R_NOMORE) { ++ dns_dbiterator_pause(nsec3chain->dbiterator); + LOCK_ZONE(zone); + ISC_LIST_UNLINK(zone->nsec3chain, nsec3chain, + link); + UNLOCK_ZONE(zone); + ISC_LIST_APPEND(cleanup, nsec3chain, link); +- dns_dbiterator_pause(nsec3chain->dbiterator); + result = fixup_nsec3param(db, version, + nsec3chain, false, + privatetype, +-- +2.23.0 + diff --git a/backport-0049-Lock-access-to-control-symtab-to-prevent-data-race.patch b/backport-0049-Lock-access-to-control-symtab-to-prevent-data-race.patch new file mode 100644 index 0000000..4edaa4a --- /dev/null +++ b/backport-0049-Lock-access-to-control-symtab-to-prevent-data-race.patch @@ -0,0 +1,165 @@ +From 7247df0fcf1445e82b56a80d0e419aff701defde Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Tue, 8 Sep 2020 12:11:06 +1000 +Subject: [PATCH] Lock access to control->symtab to prevent data race + + WARNING: ThreadSanitizer: data race + Read of size 8 at 0x000000000001 by thread T1: + #0 isccc_symtab_foreach lib/isccc/symtab.c:277:14 + #1 isccc_cc_cleansymtab lib/isccc/cc.c:954:2 + #2 control_recvmessage bin/named/controlconf.c:477:2 + #3 recv_data lib/isccc/ccmsg.c:110:2 + #4 read_cb lib/isc/netmgr/tcp.c:769:4 + #5 + + Previous write of size 8 at 0x000000000001 by thread T2: + #0 isccc_symtab_define lib/isccc/symtab.c:242:2 + #1 isccc_cc_checkdup lib/isccc/cc.c:1026:11 + #2 control_recvmessage bin/named/controlconf.c:478:11 + #3 recv_data lib/isccc/ccmsg.c:110:2 + #4 read_cb lib/isc/netmgr/tcp.c:769:4 + #5 + + Location is heap block of size 190352 at 0x000000000011 allocated by main thread: + #0 malloc + #1 isccc_symtab_create lib/isccc/symtab.c:76:18 + #2 isccc_cc_createsymtab lib/isccc/cc.c:948:10 + #3 named_controls_create bin/named/controlconf.c:1483:11 + #4 named_server_create bin/named/server.c:10057:2 + #5 setup bin/named/main.c:1256:2 + #6 main bin/named/main.c:1523:2 + + Thread T1 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create lib/isc/pthreads/thread.c:73:8 + #2 isc_nm_start lib/isc/netmgr/netmgr.c:215:3 + #3 create_managers bin/named/main.c:909:15 + #4 setup bin/named/main.c:1223:11 + #5 main bin/named/main.c:1523:2 + + Thread T2 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create lib/isc/pthreads/thread.c:73:8 + #2 isc_nm_start lib/isc/netmgr/netmgr.c:215:3 + #3 create_managers bin/named/main.c:909:15 + #4 setup bin/named/main.c:1223:11 + #5 main bin/named/main.c:1523:2 + + SUMMARY: ThreadSanitizer: data race lib/isccc/symtab.c:277:14 in isccc_symtab_foreach + +(cherry picked from commit 0450acc1b65442a0e904c895cf2875eacf409598) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/7247df0fcf1445e82b56a80d0e419aff701defde +--- + bin/named/controlconf.c | 41 +++++++++++++++++++++++++++++++---------- + 1 file changed, 31 insertions(+), 10 deletions(-) + +diff --git a/bin/named/controlconf.c b/bin/named/controlconf.c +index 1d31f7a311..9fdf49bb7a 100644 +--- a/bin/named/controlconf.c ++++ b/bin/named/controlconf.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -75,8 +76,8 @@ struct controlkey { + struct controlconnection { + isc_socket_t * sock; + isccc_ccmsg_t ccmsg; +- bool ccmsg_valid; +- bool sending; ++ bool ccmsg_valid; ++ bool sending; + isc_timer_t * timer; + isc_buffer_t * buffer; + controllistener_t * listener; +@@ -91,22 +92,23 @@ struct controllistener { + isc_sockaddr_t address; + isc_socket_t * sock; + dns_acl_t * acl; +- bool listening; +- bool exiting; ++ bool listening; ++ bool exiting; + controlkeylist_t keys; + controlconnectionlist_t connections; + isc_sockettype_t type; + uint32_t perm; + uint32_t owner; + uint32_t group; +- bool readonly; ++ bool readonly; + ISC_LINK(controllistener_t) link; + }; + + struct ns_controls { + ns_server_t *server; + controllistenerlist_t listeners; +- bool shuttingdown; ++ bool shuttingdown; ++ isc_mutex_t symtab_lock; + isccc_symtab_t *symtab; + }; + +@@ -434,8 +436,10 @@ control_recvmessage(isc_task_t *task, isc_event_t *event) { + /* + * Duplicate suppression (required for UDP). + */ ++ LOCK(&listener->controls->symtab_lock); + isccc_cc_cleansymtab(listener->controls->symtab, now); + result = isccc_cc_checkdup(listener->controls->symtab, request, now); ++ UNLOCK(&listener->controls->symtab_lock); + if (result != ISC_R_SUCCESS) { + if (result == ISC_R_EXISTS) + result = ISCCC_R_DUPLICATE; +@@ -1503,14 +1507,28 @@ ns_controls_create(ns_server_t *server, ns_controls_t **ctrlsp) { + isc_result_t result; + ns_controls_t *controls = isc_mem_get(mctx, sizeof(*controls)); + +- if (controls == NULL) ++ if (controls == NULL) { + return (ISC_R_NOMEMORY); +- controls->server = server; ++ } ++ ++ *controls = (ns_controls_t){ ++ .server = server, ++ }; ++ + ISC_LIST_INIT(controls->listeners); +- controls->shuttingdown = false; +- controls->symtab = NULL; ++ ++ result = isc_mutex_init(&controls->symtab_lock); ++ if (result != ISC_R_SUCCESS) { ++ isc_mem_put(server->mctx, controls, sizeof(*controls)); ++ return (result); ++ } ++ ++ LOCK(&controls->symtab_lock); + result = isccc_cc_createsymtab(&controls->symtab); ++ UNLOCK(&controls->symtab_lock); ++ + if (result != ISC_R_SUCCESS) { ++ isc_mutex_destroy(&controls->symtab_lock); + isc_mem_put(server->mctx, controls, sizeof(*controls)); + return (result); + } +@@ -1524,7 +1542,10 @@ ns_controls_destroy(ns_controls_t **ctrlsp) { + + REQUIRE(ISC_LIST_EMPTY(controls->listeners)); + ++ LOCK(&controls->symtab_lock); + isccc_symtab_destroy(&controls->symtab); ++ UNLOCK(&controls->symtab_lock); ++ isc_mutex_destroy(&controls->symtab_lock); + isc_mem_put(controls->server->mctx, controls, sizeof(*controls)); + *ctrlsp = NULL; + } +-- +2.23.0 + diff --git a/backport-0050-Address-lock-order-inversion.patch b/backport-0050-Address-lock-order-inversion.patch new file mode 100644 index 0000000..317840a --- /dev/null +++ b/backport-0050-Address-lock-order-inversion.patch @@ -0,0 +1,106 @@ +From cd54ac9abe8d751373ecff64ee79d7b942a5be4d Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Tue, 22 Sep 2020 16:24:06 +1000 +Subject: [PATCH] Address lock-order-inversion + + WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) + Cycle in lock order graph: M1 (0x000000000001) => M2 (0x000000000002) => M1 + + Mutex M2 acquired here while holding mutex M1 in thread T1: + #0 pthread_rwlock_wrlock + #1 isc_rwlock_lock lib/isc/rwlock.c:52:4 + #2 zone_postload lib/dns/zone.c:5101:2 + #3 receive_secure_db lib/dns/zone.c:16206:11 + #4 dispatch lib/isc/task.c:1152:7 + #5 run lib/isc/task.c:1344:2 + + Mutex M1 previously acquired by the same thread here: + #0 pthread_mutex_lock + #1 receive_secure_db lib/dns/zone.c:16204:2 + #2 dispatch lib/isc/task.c:1152:7 + #3 run lib/isc/task.c:1344:2 + + Mutex M1 acquired here while holding mutex M2 in thread T1: + #0 pthread_mutex_lock + #1 get_raw_serial lib/dns/zone.c:2518:2 + #2 zone_gotwritehandle lib/dns/zone.c:2559:4 + #3 dispatch lib/isc/task.c:1152:7 + #4 run lib/isc/task.c:1344:2 + + Mutex M2 previously acquired by the same thread here: + #0 pthread_rwlock_rdlock + #1 isc_rwlock_lock lib/isc/rwlock.c:48:3 + #2 zone_gotwritehandle lib/dns/zone.c:2552:2 + #3 dispatch lib/isc/task.c:1152:7 + #4 run lib/isc/task.c:1344:2 + + Thread T1 (running) created by main thread at: + #0 pthread_create + #1 isc_thread_create lib/isc/pthreads/thread.c:73:8 + #2 isc_taskmgr_create lib/isc/task.c:1434:3 + #3 create_managers bin/named/main.c:915:11 + #4 setup bin/named/main.c:1223:11 + #5 main bin/named/main.c:1523:2 + + SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) in pthread_rwlock_wrlock + +(cherry picked from commit 1090876693470eedf69211d0fe71ba2c88160f45) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/cd54ac9abe8d751373ecff64ee79d7b942a5be4d +--- + lib/dns/zone.c | 19 +++++++++++++------ + 1 file changed, 13 insertions(+), 6 deletions(-) + +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index 096ff82f34..fd634af65f 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -2468,6 +2468,7 @@ zone_gotwritehandle(isc_task_t *task, isc_event_t *event) { + isc_result_t result = ISC_R_SUCCESS; + dns_dbversion_t *version = NULL; + dns_masterrawheader_t rawdata; ++ dns_db_t *db = NULL; + + REQUIRE(DNS_ZONE_VALID(zone)); + INSIST(task == zone->task); +@@ -2483,9 +2484,12 @@ zone_gotwritehandle(isc_task_t *task, isc_event_t *event) { + INSIST(zone != zone->raw); + ZONEDB_LOCK(&zone->dblock, isc_rwlocktype_read); + if (zone->db != NULL) { ++ dns_db_attach(zone->db, &db); ++ } ++ ZONEDB_UNLOCK(&zone->dblock, isc_rwlocktype_read); ++ if (db != NULL) { + const dns_master_style_t *output_style; +- +- dns_db_currentversion(zone->db, &version); ++ dns_db_currentversion(db, &version); + dns_master_initrawheader(&rawdata); + if (inline_secure(zone)) + get_raw_serial(zone->raw, &rawdata); +@@ -2495,15 +2499,18 @@ zone_gotwritehandle(isc_task_t *task, isc_event_t *event) { + output_style = zone->masterstyle; + else + output_style = &dns_master_style_default; +- result = dns_master_dumpinc3(zone->mctx, zone->db, version, ++ result = dns_master_dumpinc3(zone->mctx, db, version, + output_style, zone->masterfile, + zone->task, dump_done, zone, + &zone->dctx, zone->masterformat, + &rawdata); +- dns_db_closeversion(zone->db, &version, false); +- } else ++ dns_db_closeversion(db, &version, false); ++ } else { + result = ISC_R_CANCELED; +- ZONEDB_UNLOCK(&zone->dblock, isc_rwlocktype_read); ++ } ++ if (db != NULL) { ++ dns_db_detach(&db); ++ } + UNLOCK_ZONE(zone); + if (result != DNS_R_CONTINUE) + goto fail; +-- +2.23.0 + diff --git a/backport-0051-Break-lock-order-loop-by-sending-TAT-in-an-event.patch b/backport-0051-Break-lock-order-loop-by-sending-TAT-in-an-event.patch new file mode 100644 index 0000000..3d2c91d --- /dev/null +++ b/backport-0051-Break-lock-order-loop-by-sending-TAT-in-an-event.patch @@ -0,0 +1,283 @@ +From d95a18711bd871651555b7a0a6914229341e0b84 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Tue, 22 Sep 2020 15:22:34 +1000 +Subject: [PATCH] Break lock order loop by sending TAT in an event + +The dotat() function has been changed to send the TAT +query asynchronously, so there's no lock order loop +because we initialize the data first and then we schedule +the TAT send to happen asynchronously. + +This breaks following lock-order loops: + +zone->lock (dns_zone_setviewcommit) while holding view->lock +(dns_view_setviewcommit) + +keytable->lock (dns_keytable_find) while holding zone->lock +(zone_asyncload) + +view->lock (dns_view_findzonecut) while holding keytable->lock +(dns_keytable_forall) + +(cherry picked from commit 3c4b68af7c0cd8213bcae92faee3bf2a7e9284d1) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/d95a18711bd871651555b7a0a6914229341e0b84 +--- + bin/named/include/named/server.h | 1 + + bin/named/server.c | 139 ++++++++++++++++++++----------- + 2 files changed, 93 insertions(+), 47 deletions(-) + +diff --git a/bin/named/include/named/server.h b/bin/named/include/named/server.h +index b2537e7aa0..4fd0194a54 100644 +--- a/bin/named/include/named/server.h ++++ b/bin/named/include/named/server.h +@@ -34,6 +34,7 @@ + #define NS_EVENT_RELOAD (NS_EVENTCLASS + 0) + #define NS_EVENT_CLIENTCONTROL (NS_EVENTCLASS + 1) + #define NS_EVENT_DELZONE (NS_EVENTCLASS + 2) ++#define NS_EVENT_TATSEND (NS_EVENTCLASS + 3) + + /*% + * Name server state. Better here than in lots of separate global variables. +diff --git a/bin/named/server.c b/bin/named/server.c +index 638625f171..30d38beb40 100644 +--- a/bin/named/server.c ++++ b/bin/named/server.c +@@ -6094,11 +6094,14 @@ heartbeat_timer_tick(isc_task_t *task, isc_event_t *event) { + } + + typedef struct { +- isc_mem_t *mctx; +- isc_task_t *task; +- dns_rdataset_t rdataset; +- dns_rdataset_t sigrdataset; +- dns_fetch_t *fetch; ++ isc_mem_t *mctx; ++ isc_task_t *task; ++ dns_fetch_t *fetch; ++ dns_view_t *view; ++ dns_fixedname_t tatname; ++ dns_fixedname_t keyname; ++ dns_rdataset_t rdataset; ++ dns_rdataset_t sigrdataset; + } ns_tat_t; + + static int +@@ -6118,10 +6121,11 @@ tat_done(isc_task_t *task, isc_event_t *event) { + dns_fetchevent_t *devent; + ns_tat_t *tat; + +- UNUSED(task); + INSIST(event != NULL && event->ev_type == DNS_EVENT_FETCHDONE); + INSIST(event->ev_arg != NULL); + ++ UNUSED(task); ++ + tat = event->ev_arg; + devent = (dns_fetchevent_t *) event; + +@@ -6136,6 +6140,7 @@ tat_done(isc_task_t *task, isc_event_t *event) { + dns_rdataset_disassociate(&tat->rdataset); + if (dns_rdataset_isassociated(&tat->sigrdataset)) + dns_rdataset_disassociate(&tat->sigrdataset); ++ dns_view_detach(&tat->view); + isc_task_detach(&tat->task); + isc_mem_putanddetach(&tat->mctx, tat, sizeof(*tat)); + } +@@ -6148,7 +6153,7 @@ struct dotat_arg { + /*% + * Prepare the QNAME for the TAT query to be sent by processing the trust + * anchors present at 'keynode' of 'keytable'. Store the result in 'dst' and +- * the domain name which 'keynode' is associated with in 'origin'. ++ * the domain name which 'keynode' is associated with in 'keyname'. + * + * A maximum of 12 key IDs can be reported in a single TAT query due to the + * 63-octet length limit for any single label in a domain name. If there are +@@ -6156,7 +6161,7 @@ struct dotat_arg { + * reported in the TAT query. + */ + static isc_result_t +-get_tat_qname(dns_name_t *dst, dns_name_t **origin, dns_keytable_t *keytable, ++get_tat_qname(dns_name_t *dst, dns_name_t **keyname, dns_keytable_t *keytable, + dns_keynode_t *keynode) + { + dns_keynode_t *firstnode = keynode; +@@ -6167,12 +6172,12 @@ get_tat_qname(dns_name_t *dst, dns_name_t **origin, dns_keytable_t *keytable, + char label[64]; + int m; + +- REQUIRE(origin != NULL && *origin == NULL); ++ REQUIRE(keyname != NULL && *keyname == NULL); + + do { + dst_key_t *key = dns_keynode_key(keynode); + if (key != NULL) { +- *origin = dst_key_name(key); ++ *keyname = dst_key_name(key); + if (n < (sizeof(ids)/sizeof(ids[0]))) { + ids[n] = dst_key_id(key); + n++; +@@ -6212,52 +6217,35 @@ get_tat_qname(dns_name_t *dst, dns_name_t **origin, dns_keytable_t *keytable, + isc_textregion_consume(&r, m); + } + +- return (dns_name_fromstring2(dst, label, *origin, 0, NULL)); ++ return (dns_name_fromstring2(dst, label, *keyname, 0, NULL)); + } + + static void +-dotat(dns_keytable_t *keytable, dns_keynode_t *keynode, void *arg) { +- struct dotat_arg *dotat_arg = arg; ++tat_send(isc_task_t *task, isc_event_t *event) { ++ ns_tat_t *tat; + char namebuf[DNS_NAME_FORMATSIZE]; +- dns_fixedname_t fixed, fdomain; +- dns_name_t *tatname, *domain; ++ dns_fixedname_t fdomain; ++ dns_name_t *domain; + dns_rdataset_t nameservers; +- dns_name_t *origin = NULL; + isc_result_t result; +- dns_view_t *view; +- isc_task_t *task; +- ns_tat_t *tat; ++ dns_name_t *keyname; ++ dns_name_t *tatname; + +- REQUIRE(keytable != NULL); +- REQUIRE(keynode != NULL); +- REQUIRE(dotat_arg != NULL); ++ INSIST(event != NULL && event->ev_type == NS_EVENT_TATSEND); ++ INSIST(event->ev_arg != NULL); + +- view = dotat_arg->view; +- task = dotat_arg->task; ++ UNUSED(task); + +- tatname = dns_fixedname_initname(&fixed); +- result = get_tat_qname(tatname, &origin, keytable, keynode); +- if (result != ISC_R_SUCCESS) { +- return; +- } ++ tat = event->ev_arg; ++ ++ tatname = dns_fixedname_name(&tat->tatname); ++ keyname = dns_fixedname_name(&tat->keyname); + + dns_name_format(tatname, namebuf, sizeof(namebuf)); + isc_log_write(ns_g_lctx, NS_LOGCATEGORY_GENERAL, NS_LOGMODULE_SERVER, + ISC_LOG_INFO, +- "%s: sending trust-anchor-telemetry query '%s/NULL'", +- view->name, namebuf); +- +- tat = isc_mem_get(dotat_arg->view->mctx, sizeof(*tat)); +- if (tat == NULL) +- return; +- +- tat->mctx = NULL; +- tat->task = NULL; +- tat->fetch = NULL; +- dns_rdataset_init(&tat->rdataset); +- dns_rdataset_init(&tat->sigrdataset); +- isc_mem_attach(dotat_arg->view->mctx, &tat->mctx); +- isc_task_attach(task, &tat->task); ++ "%s: sending trust-anchor-telemetry query '%s/NULL'", ++ tat->view->name, namebuf); + + /* + * TAT queries should be sent to the authoritative servers for a given +@@ -6276,20 +6264,20 @@ dotat(dns_keytable_t *keytable, dns_keynode_t *keynode, void *arg) { + * order to eventually find the destination host to send the TAT query + * to. + * +- * 'origin' holds the domain name at 'keynode', i.e. the domain name ++ * 'keyname' holds the domain name at 'keynode', i.e. the domain name + * for which the trust anchors to be reported by this TAT query are + * defined. + * + * After the dns_view_findzonecut() call, 'domain' will hold the +- * deepest zone cut we can find for 'origin' while 'nameservers' will ++ * deepest zone cut we can find for 'keyname' while 'nameservers' will + * hold the NS RRset at that zone cut. + */ + domain = dns_fixedname_initname(&fdomain); + dns_rdataset_init(&nameservers); +- result = dns_view_findzonecut(view, origin, domain, 0, 0, true, ++ result = dns_view_findzonecut(tat->view, keyname, domain, 0, 0, true, + &nameservers, NULL); + if (result == ISC_R_SUCCESS) { +- result = dns_resolver_createfetch(view->resolver, tatname, ++ result = dns_resolver_createfetch(tat->view->resolver, tatname, + dns_rdatatype_null, domain, + &nameservers, NULL, 0, + tat->task, tat_done, tat, +@@ -6314,9 +6302,66 @@ dotat(dns_keytable_t *keytable, dns_keynode_t *keynode, void *arg) { + } + + if (result != ISC_R_SUCCESS) { ++ dns_view_detach(&tat->view); + isc_task_detach(&tat->task); + isc_mem_putanddetach(&tat->mctx, tat, sizeof(*tat)); + } ++ isc_event_free(&event); ++} ++ ++static void ++dotat(dns_keytable_t *keytable, dns_keynode_t *keynode, void *arg) { ++ struct dotat_arg *dotat_arg = arg; ++ dns_name_t *keyname = NULL; ++ isc_result_t result; ++ dns_view_t *view; ++ isc_task_t *task; ++ ns_tat_t *tat; ++ isc_event_t *event; ++ ++ REQUIRE(keytable != NULL); ++ REQUIRE(keynode != NULL); ++ REQUIRE(dotat_arg != NULL); ++ ++ view = dotat_arg->view; ++ task = dotat_arg->task; ++ ++ tat = isc_mem_get(dotat_arg->view->mctx, sizeof(*tat)); ++ ++ tat->fetch = NULL; ++ tat->mctx = NULL; ++ tat->task = NULL; ++ tat->view = NULL; ++ dns_rdataset_init(&tat->rdataset); ++ dns_rdataset_init(&tat->sigrdataset); ++ result = get_tat_qname(dns_fixedname_initname(&tat->tatname), &keyname, ++ keytable, keynode); ++ if (result != ISC_R_SUCCESS) { ++ isc_mem_put(dotat_arg->view->mctx, tat, sizeof(*tat)); ++ return; ++ } ++ dns_name_copy(keyname, dns_fixedname_initname(&tat->keyname), NULL); ++ isc_mem_attach(dotat_arg->view->mctx, &tat->mctx); ++ isc_task_attach(task, &tat->task); ++ dns_view_attach(view, &tat->view); ++ ++ /* ++ * We don't want to be holding the keytable lock when calling ++ * dns_view_findzonecut() as it creates a lock order loop so ++ * call dns_view_findzonecut() in a event handler. ++ * ++ * zone->lock (dns_zone_setviewcommit) while holding view->lock ++ * (dns_view_setviewcommit) ++ * ++ * keytable->lock (dns_keytable_find) while holding zone->lock ++ * (zone_asyncload) ++ * ++ * view->lock (dns_view_findzonecut) while holding keytable->lock ++ * (dns_keytable_forall) ++ */ ++ event = isc_event_allocate(tat->mctx, keytable, NS_EVENT_TATSEND, ++ tat_send, tat, sizeof(isc_event_t)); ++ isc_task_send(task, &event); + } + + static void +-- +2.23.0 + diff --git a/backport-0052-Handle-DNS_R_NCACHENXRRSET-in-fetch_callback_-dnskey.patch b/backport-0052-Handle-DNS_R_NCACHENXRRSET-in-fetch_callback_-dnskey.patch new file mode 100644 index 0000000..a55bf41 --- /dev/null +++ b/backport-0052-Handle-DNS_R_NCACHENXRRSET-in-fetch_callback_-dnskey.patch @@ -0,0 +1,55 @@ +From 4a4605fbefd74ae8417f4601950e313ea2977eba Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Wed, 28 Oct 2020 11:58:38 +1100 +Subject: [PATCH] Handle DNS_R_NCACHENXRRSET in + fetch_callback_{dnskey,validator}() + +DNS_R_NCACHENXRRSET can be return when zones are in transition state +from being unsigned to signed and signed to unsigned. The validation +should be resumed and should result in a insecure answer. + +(cherry picked from commit 718e597def1daaae7edf9b151f6b24e0acc5c87a) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/4a4605fbefd74ae8417f4601950e313ea2977eba +--- + lib/dns/validator.c | 19 +++++++++++++------ + 1 file changed, 13 insertions(+), 6 deletions(-) + +diff --git a/lib/dns/validator.c b/lib/dns/validator.c +index 1605261a48..9d966f7bb1 100644 +--- a/lib/dns/validator.c ++++ b/lib/dns/validator.c +@@ -418,17 +418,24 @@ fetch_callback_validator(isc_task_t *task, isc_event_t *event) { + val->fetch = NULL; + if (CANCELED(val)) { + validator_done(val, ISC_R_CANCELED); +- } else if (eresult == ISC_R_SUCCESS) { +- validator_log(val, ISC_LOG_DEBUG(3), +- "keyset with trust %s", ++ } else if (eresult == ISC_R_SUCCESS || eresult == DNS_R_NCACHENXRRSET) { ++ /* ++ * We have an answer to our DNSKEY query. Either the DNSKEY ++ * RRset or a NODATA response. ++ */ ++ validator_log(val, ISC_LOG_DEBUG(3), "%s with trust %s", ++ eresult == ISC_R_SUCCESS ? "keyset" ++ : "NCACHENXRRSET", + dns_trust_totext(rdataset->trust)); + /* +- * Only extract the dst key if the keyset is secure. ++ * Only extract the dst key if the keyset exists and is secure. + */ +- if (rdataset->trust >= dns_trust_secure) { ++ if (eresult == ISC_R_SUCCESS && ++ rdataset->trust >= dns_trust_secure) { + result = get_dst_key(val, val->siginfo, rdataset); +- if (result == ISC_R_SUCCESS) ++ if (result == ISC_R_SUCCESS) { + val->keyset = &val->frdataset; ++ } + } + result = validate(val, true); + if (result == DNS_R_NOVALIDSIG && +-- +2.23.0 + diff --git a/backport-0053-Lock-read-of-refs-when-atomics-are-not-available.patch b/backport-0053-Lock-read-of-refs-when-atomics-are-not-available.patch new file mode 100644 index 0000000..84c98a3 --- /dev/null +++ b/backport-0053-Lock-read-of-refs-when-atomics-are-not-available.patch @@ -0,0 +1,68 @@ +From a241c69920fe5a609305fb435b593c2d997626aa Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 15 Oct 2020 15:05:30 +1100 +Subject: [PATCH] Lock read of refs when atomics are not available. + + WARNING: ThreadSanitizer: data race + Read of size 4 at 0x000000000001 by thread T1 (mutexes: write M1): + #0 zone_iattach lib/dns/zone.c:5412:2 + #1 soa_query lib/dns/zone.c:12725:2 + #2 dispatch lib/isc/task.c:1157:7 + #3 run lib/isc/task.c:1331:2 + + Previous write of size 4 at 0x000000000001 by thread T2 (mutexes: write M2): + #0 dns_zone_detach lib/dns/zone.c:5346:2 + #1 ns_server_refreshcommand bin/named/./server.c:9880:3 + #2 ns_control_docommand bin/named/control.c:247:12 + #3 control_recvmessage bin/named/controlconf.c:469:13 + #4 dispatch lib/isc/task.c:1157:7 + #5 run lib/isc/task.c:1331:2 +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/a241c69920fe5a609305fb435b593c2d997626aa +--- + lib/isc/include/isc/refcount.h | 2 +- + lib/isc/refcount.c | 15 +++++++++++++++ + 2 files changed, 16 insertions(+), 1 deletion(-) + +diff --git a/lib/isc/include/isc/refcount.h b/lib/isc/include/isc/refcount.h +index 5d1a5d2bbf..1c9696fc6b 100644 +--- a/lib/isc/include/isc/refcount.h ++++ b/lib/isc/include/isc/refcount.h +@@ -211,7 +211,7 @@ typedef struct isc_refcount { + ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \ + } while (0) + +-#define isc_refcount_current(rp) ((unsigned int)((rp)->refs)) ++unsigned int isc_refcount_current(isc_refcount_t *rp); + + /*% + * Increments the reference count, returning the new value in +diff --git a/lib/isc/refcount.c b/lib/isc/refcount.c +index d83c82fe7f..0d961effe1 100644 +--- a/lib/isc/refcount.c ++++ b/lib/isc/refcount.c +@@ -19,6 +19,21 @@ + #include + #include + ++#if defined(ISC_PLATFORM_USETHREADS) && !defined(ISC_REFCOUNT_HAVEATOMIC) ++unsigned int ++isc_refcount_current(isc_refcount_t *ref) { ++ isc_result_t result; ++ unsigned int answer; ++ ++ result = isc_mutex_lock(&ref->lock); ++ ISC_ERROR_RUNTIMECHECK(result == ISC_R_SUCCESS); ++ answer = ref->refs; ++ result = isc_mutex_unlock(&ref->lock); ++ ISC_ERROR_RUNTIMECHECK(result == ISC_R_SUCCESS); ++ return (answer); ++} ++#endif ++ + isc_result_t + isc_refcount_init(isc_refcount_t *ref, unsigned int n) { + REQUIRE(ref != NULL); +-- +2.23.0 + diff --git a/backport-0054-Inactive-incorrectly-incremented.patch b/backport-0054-Inactive-incorrectly-incremented.patch new file mode 100644 index 0000000..5217e21 --- /dev/null +++ b/backport-0054-Inactive-incorrectly-incremented.patch @@ -0,0 +1,39 @@ +From 25150c15e7cfa73289f04470e2e699ebb7c28fef Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Fri, 18 Dec 2020 13:31:07 +1100 +Subject: [PATCH] Inactive incorrectly incremented + +It is possible to have two threads destroying an rbtdb at the same +time when detachnode() executes and removes the last reference to +a node between exiting being set to true for the node and testing +if the references are zero in maybe_free_rbtdb(). Move NODE_UNLOCK() +to after checking if references is zero to prevent detachnode() +changing the reference count too early. + +(cherry picked from commit 859d2fdad6d1c6ff20083a4c463a929cbeb26438) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/25150c15e7cfa73289f04470e2e699ebb7c28fef +--- + lib/dns/rbtdb.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/dns/rbtdb.c b/lib/dns/rbtdb.c +index c60117c5ee..34fc404b2e 100644 +--- a/lib/dns/rbtdb.c ++++ b/lib/dns/rbtdb.c +@@ -1453,11 +1453,11 @@ maybe_free_rbtdb(dns_rbtdb_t *rbtdb) { + for (i = 0; i < rbtdb->node_lock_count; i++) { + NODE_LOCK(&rbtdb->node_locks[i].lock, isc_rwlocktype_write); + rbtdb->node_locks[i].exiting = true; +- NODE_UNLOCK(&rbtdb->node_locks[i].lock, isc_rwlocktype_write); + if (isc_refcount_current(&rbtdb->node_locks[i].references) + == 0) { + inactive++; + } ++ NODE_UNLOCK(&rbtdb->node_locks[i].lock, isc_rwlocktype_write); + } + + if (inactive != 0) { +-- +2.23.0 + diff --git a/backport-0055-Resolve-TSAN-data-race-in-zone_maintenance.patch b/backport-0055-Resolve-TSAN-data-race-in-zone_maintenance.patch new file mode 100644 index 0000000..2dbccbb --- /dev/null +++ b/backport-0055-Resolve-TSAN-data-race-in-zone_maintenance.patch @@ -0,0 +1,55 @@ +From e2e6fd4fa09494c703774c3adb838bfca79b899b Mon Sep 17 00:00:00 2001 +From: Diego Fronza +Date: Wed, 7 Apr 2021 10:48:12 -0300 +Subject: [PATCH] Resolve TSAN data race in zone_maintenance + +Fix race between zone_maintenance and dns_zone_notifyreceive functions, +zone_maintenance was attempting to read a zone flag calling +DNS_ZONE_FLAG(zone, flag) while dns_zone_notifyreceive was updating +a flag in the same zone calling DNS_ZONE_SETFLAG(zone, ...). + +The code reading the flag in zone_maintenance was not protected by the +zone's lock, to avoid a race the zone's lock is now being acquired +before an attempt to read the zone flag is made. +Conflict: delete start_refresh +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/e2e6fd4fa09494c703774c3adb838bfca79b899b +--- + lib/dns/zone.c | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index a895b25434..9866f85184 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -10187,6 +10187,7 @@ zone_maintenance(dns_zone_t *zone) { + isc_time_t now; + isc_result_t result; + bool dumping, load_pending, viewok; ++ bool need_notify; + + REQUIRE(DNS_ZONE_VALID(zone)); + ENTER; +@@ -10268,11 +10269,16 @@ zone_maintenance(dns_zone_t *zone) { + /* + * Slaves send notifies before backing up to disk, masters after. + */ +- if (zone->type == dns_zone_slave && +- (DNS_ZONE_FLAG(zone, DNS_ZONEFLG_NEEDNOTIFY) || +- DNS_ZONE_FLAG(zone, DNS_ZONEFLG_NEEDSTARTUPNOTIFY)) && +- isc_time_compare(&now, &zone->notifytime) >= 0) ++ LOCK_ZONE(zone); ++ need_notify = zone->type == dns_zone_slave && ++ (DNS_ZONE_FLAG(zone, DNS_ZONEFLG_NEEDNOTIFY) || ++ DNS_ZONE_FLAG(zone, DNS_ZONEFLG_NEEDSTARTUPNOTIFY)) && ++ (isc_time_compare(&now, &zone->notifytime) >= 0); ++ UNLOCK_ZONE(zone); ++ ++ if (need_notify) { + zone_notify(zone, &now); ++ } + + /* + * Do we need to consolidate the backing store? +-- +2.23.0 + diff --git a/backport-0056-Free-resources-when-gss_accept_sec_context-fails.patch b/backport-0056-Free-resources-when-gss_accept_sec_context-fails.patch new file mode 100644 index 0000000..dda3762 --- /dev/null +++ b/backport-0056-Free-resources-when-gss_accept_sec_context-fails.patch @@ -0,0 +1,34 @@ +From dc1ed887760fc64d65034eb38b2066dc5bf54e04 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Micha=C5=82=20K=C4=99pie=C5=84?= +Date: Thu, 8 Apr 2021 10:33:44 +0200 +Subject: [PATCH] Free resources when gss_accept_sec_context() fails + +Even if a call to gss_accept_sec_context() fails, it might still cause a +GSS-API response token to be allocated and left for the caller to +release. Make sure the token is released before an early return from +dst_gssapi_acceptctx(). + +(cherry picked from commit d954e152d9f2901118b1fe36d3931ec244317fab) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/dc1ed887760fc64d65034eb38b2066dc5bf54e04 +--- + lib/dns/gssapictx.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/lib/dns/gssapictx.c b/lib/dns/gssapictx.c +index 549bd47f78..482c25e1cc 100644 +--- a/lib/dns/gssapictx.c ++++ b/lib/dns/gssapictx.c +@@ -715,6 +715,9 @@ dst_gssapi_acceptctx(gss_cred_id_t cred, + default: + gss_log(3, "failed gss_accept_sec_context: %s", + gss_error_tostring(gret, minor, buf, sizeof(buf))); ++ if (gouttoken.length > 0U) { ++ (void)gss_release_buffer(&minor, &gouttoken); ++ } + return (result); + } + +-- +2.23.0 + diff --git a/backport-0057-Unload-a-zone-if-a-transfer-breaks-its-SOA-record.patch b/backport-0057-Unload-a-zone-if-a-transfer-breaks-its-SOA-record.patch new file mode 100644 index 0000000..bdfdafb --- /dev/null +++ b/backport-0057-Unload-a-zone-if-a-transfer-breaks-its-SOA-record.patch @@ -0,0 +1,46 @@ +From d7011a55d3e7f06fbbb764678ef204e4e51d6f8b Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 25 Feb 2021 14:11:05 +1100 +Subject: [PATCH] Unload a zone if a transfer breaks its SOA record + +If a zone transfer results in a zone not having any NS records, named +stops serving it because such a zone is broken. Do the same if an +incoming zone transfer results in a zone lacking an SOA record at the +apex or containing more than one SOA record. +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/d7011a55d3e7f06fbbb764678ef204e4e51d6f8b +--- + lib/dns/zone.c | 15 ++++++++++++--- + 1 file changed, 12 insertions(+), 3 deletions(-) + +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index 9866f85184..0797d004ae 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -15987,11 +15987,20 @@ zone_xfrdone(dns_zone_t *zone, isc_result_t result) { + &retry, &expire, &minimum, NULL); + ZONEDB_UNLOCK(&zone->dblock, isc_rwlocktype_read); + if (result == ISC_R_SUCCESS) { +- if (soacount != 1) ++ if (soacount != 1) { + dns_zone_log(zone, ISC_LOG_ERROR, + "transferred zone " +- "has %d SOA record%s", soacount, +- (soacount != 0) ? "s" : ""); ++ "has %d SOA records", ++ soacount); ++ if (DNS_ZONE_FLAG(zone, ++ DNS_ZONEFLG_HAVETIMERS)) { ++ zone->refresh = DNS_ZONE_DEFAULTREFRESH; ++ zone->retry = DNS_ZONE_DEFAULTRETRY; ++ } ++ DNS_ZONE_CLRFLAG(zone, DNS_ZONEFLG_HAVETIMERS); ++ zone_unload(zone); ++ goto next_master; ++ } + if (nscount == 0) { + dns_zone_log(zone, ISC_LOG_ERROR, + "transferred zone " +-- +2.23.0 + diff --git a/backport-0058-Address-inconsistencies-in-checking-added-RRsets.patch b/backport-0058-Address-inconsistencies-in-checking-added-RRsets.patch new file mode 100644 index 0000000..0843b4b --- /dev/null +++ b/backport-0058-Address-inconsistencies-in-checking-added-RRsets.patch @@ -0,0 +1,34 @@ +From 0313ede6e113f5d61fa8182340a964502c9f1954 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Fri, 12 Feb 2021 14:51:28 +1100 +Subject: [PATCH] Address inconsistencies in checking added RRsets + +loading_addrdataset() rejects SOA RRsets which are not at top of zone. +addrdataset() should similarly reject such RRsets. +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/0313ede6e113f5d61fa8182340a964502c9f1954 +--- + lib/dns/rbtdb.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/lib/dns/rbtdb.c b/lib/dns/rbtdb.c +index 34fc404b2e..9d27a1519f 100644 +--- a/lib/dns/rbtdb.c ++++ b/lib/dns/rbtdb.c +@@ -6839,6 +6839,13 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version, + INSIST(rbtversion == NULL || rbtversion->rbtdb == rbtdb); + + if (rbtdb->common.methods == &zone_methods) { ++ /* ++ * SOA records are only allowed at top of zone. ++ */ ++ if (rdataset->type == dns_rdatatype_soa && ++ node != rbtdb->origin_node) { ++ return (DNS_R_NOTZONETOP); ++ } + RWLOCK(&rbtdb->tree_lock, isc_rwlocktype_read); + REQUIRE(((rbtnode->nsec == DNS_RBT_NSEC_NSEC3 && + (rdataset->type == dns_rdatatype_nsec3 || +-- +2.23.0 + diff --git a/backport-0059-dns_rdata_tostruct-should-reject-rdata-with-DNS_RDAT.patch b/backport-0059-dns_rdata_tostruct-should-reject-rdata-with-DNS_RDAT.patch new file mode 100644 index 0000000..45f9202 --- /dev/null +++ b/backport-0059-dns_rdata_tostruct-should-reject-rdata-with-DNS_RDAT.patch @@ -0,0 +1,42 @@ +From 5374630ca4ec42eeb6cd2bd33c15eeea7dddcecf Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Thu, 10 Jun 2021 10:14:17 +1000 +Subject: [PATCH] dns_rdata_tostruct() should reject rdata with + DNS_RDATA_UPDATE set + +(cherry picked from commit e97249e01270ab43065e3a7d6ba3f5a36593a2c4) +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/5374630ca4ec42eeb6cd2bd33c15eeea7dddcecf +--- + lib/dns/include/dns/rdata.h | 2 +- + lib/dns/rdata.c | 1 + + 2 files changed, 2 insertions(+), 1 deletion(-) + +diff --git a/lib/dns/include/dns/rdata.h b/lib/dns/include/dns/rdata.h +index 7794fb051d..804cd5b76a 100644 +--- a/lib/dns/include/dns/rdata.h ++++ b/lib/dns/include/dns/rdata.h +@@ -509,7 +509,7 @@ dns_rdata_tostruct(const dns_rdata_t *rdata, void *target, isc_mem_t *mctx); + * + * Requires: + * +- *\li 'rdata' is a valid, non-empty rdata. ++ *\li 'rdata' is a valid, non-empty, non-pseudo rdata. + * + *\li 'target' to point to a valid pointer for the type and class. + * +diff --git a/lib/dns/rdata.c b/lib/dns/rdata.c +index 3bd7f54661..a255967e46 100644 +--- a/lib/dns/rdata.c ++++ b/lib/dns/rdata.c +@@ -1233,6 +1233,7 @@ dns_rdata_tostruct(const dns_rdata_t *rdata, void *target, isc_mem_t *mctx) { + + REQUIRE(rdata != NULL); + REQUIRE(DNS_RDATA_VALIDFLAGS(rdata)); ++ REQUIRE((rdata->flags & DNS_RDATA_UPDATE) == 0); + + TOSTRUCTSWITCH + +-- +2.23.0 + diff --git a/backport-0060-Update-init_count-atomically-to-silence-tsan-errors.patch b/backport-0060-Update-init_count-atomically-to-silence-tsan-errors.patch new file mode 100644 index 0000000..82f0bf5 --- /dev/null +++ b/backport-0060-Update-init_count-atomically-to-silence-tsan-errors.patch @@ -0,0 +1,103 @@ +From 90185b225f4c7acde2fbb04697d857fe496725a2 Mon Sep 17 00:00:00 2001 +From: Mark Andrews +Date: Mon, 7 Sep 2020 16:12:31 +1000 +Subject: [PATCH] Update 'init_count' atomically to silence tsan errors. + +Conflict: replace isc_atomic_xadd to count++ in seg4 +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/90185b225f4c7acde2fbb04697d857fe496725a2 +--- + lib/dns/rbtdb.c | 35 ++++++++++++++++++++++++++++++----- + 1 file changed, 30 insertions(+), 5 deletions(-) + +diff --git a/lib/dns/rbtdb.c b/lib/dns/rbtdb.c +index 21bd85c322..31ced8e73a 100644 +--- a/lib/dns/rbtdb.c ++++ b/lib/dns/rbtdb.c +@@ -399,6 +399,23 @@ typedef isc_mutex_t nodelock_t; + #define NODE_WEAKDOWNGRADE(l) ((void)0) + #endif + ++#if defined(ISC_PLATFORM_HAVESTDATOMIC) ++#if defined(__cplusplus) ++#include ++#else ++#include ++#endif ++#define DNS_RBTDB_STDATOMIC 1 ++#define DNS_RBTDB_INC(x) atomic_fetch_add(&(x), (1)) ++#define DNS_RBTDB_LOAD(x) atomic_load(&(x)) ++#elif defined(ISC_PLATFORM_HAVEXADD) ++#define DNS_RBTDB_INC(x) isc_atomic_xadd((int *)&(x), 1); ++#define DNS_RBTDB_LOAD(x) isc_atomic_xadd((int *)&(x), 0); ++#else ++#define DNS_RBTDB_INC(x) ((x)++) ++#define DNS_RBTDB_LOAD(x) (x) ++#endif ++ + /*% + * Whether to rate-limit updating the LRU to avoid possible thread contention. + * Our performance measurement has shown the cost is marginal, so it's defined +@@ -457,7 +474,11 @@ typedef struct rdatasetheader { + * this rdataset. + */ + +- uint32_t count; ++#ifdef DNS_RBTDB_STDATOMIC ++ _Atomic(uint32_t) count; ++#else ++ uint32_t count; ++#endif + /*%< + * Monotonously increased every time this rdataset is bound so that + * it is used as the base of the starting point in DNS responses +@@ -952,7 +973,11 @@ static char FILE_VERSION[32] = "\0"; + * that indicates that the database does not implement cyclic + * processing. + */ ++#ifdef DNS_RBTDB_STDATOMIC ++static _Atomic(unsigned int) init_count; ++#else + static unsigned int init_count; ++#endif + + /* + * Locking +@@ -3322,7 +3347,7 @@ bind_rdataset(dns_rbtdb_t *rbtdb, dns_rbtnode_t *node, rdatasetheader_t *header, + rdataset->private2 = node; + raw = (unsigned char *)header + sizeof(*header); + rdataset->private3 = raw; +- rdataset->count = header->count++; ++ rdataset->count = DNS_RBTDB_INC(header->count); + if (rdataset->count == UINT32_MAX) + rdataset->count = 0; + +@@ -6841,7 +6866,7 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version, + newheader->attributes |= RDATASET_ATTR_ZEROTTL; + newheader->noqname = NULL; + newheader->closest = NULL; +- newheader->count = isc_atomic_xadd((int32_t*)&init_count, 1); ++ newheader->count = DNS_RBTDB_INC(init_count); + newheader->trust = rdataset->trust; + newheader->additional_auth = NULL; + newheader->additional_glue = NULL; +@@ -7037,7 +7062,7 @@ subtractrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version, + newheader->trust = 0; + newheader->noqname = NULL; + newheader->closest = NULL; +- newheader->count = isc_atomic_xadd((int32_t*)&init_count, 1); ++ newheader->count = DNS_RBTDB_INC(init_count); + newheader->additional_auth = NULL; + newheader->additional_glue = NULL; + newheader->last_used = 0; +@@ -7483,7 +7508,7 @@ loading_addrdataset(void *arg, dns_name_t *name, dns_rdataset_t *rdataset) { + newheader->serial = 1; + newheader->noqname = NULL; + newheader->closest = NULL; +- newheader->count = isc_atomic_xadd((int32_t*)&init_count, 1); ++ newheader->count = DNS_RBTDB_INC(init_count); + newheader->additional_auth = NULL; + newheader->additional_glue = NULL; + newheader->last_used = 0; +-- +2.23.0 + diff --git a/backport-0061-Refactored-dns_message_t-for-using-attach-detach-sem.patch b/backport-0061-Refactored-dns_message_t-for-using-attach-detach-sem.patch new file mode 100644 index 0000000..17dc802 --- /dev/null +++ b/backport-0061-Refactored-dns_message_t-for-using-attach-detach-sem.patch @@ -0,0 +1,1059 @@ +From 8781aef52e6d5498d6007df3ca466d7e23641836 Mon Sep 17 00:00:00 2001 +From: Diego Fronza +Date: Mon, 21 Sep 2020 16:16:15 -0300 +Subject: [PATCH] Refactored dns_message_t for using attach/detach semantics + +This commit will be used as a base for the next code updates in order +to have a better control of dns_message_t objects' lifetime. +Conflict: NA +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/8781aef52e6d5498d6007df3ca466d7e23641836 +--- + bin/dig/dighost.c | 28 ++++++++--------- + bin/dig/nslookup.c | 2 +- + bin/named/client.c | 4 +-- + bin/named/update.c | 2 +- + bin/named/xfrout.c | 2 +- + bin/nsupdate/nsupdate.c | 32 +++++++++---------- + bin/tests/optional/gsstest.c | 10 +++--- + bin/tests/optional/sig0_test.c | 4 +-- + bin/tests/system/pipelined/pipequeries.c | 4 +-- + bin/tests/system/tkey/keycreate.c | 4 +-- + bin/tests/system/tkey/keydelete.c | 4 +-- + bin/tests/wire_test.c | 4 +-- + bin/tools/dnstap-read.c | 2 +- + bin/tools/mdig.c | 4 +-- + lib/dns/client.c | 14 ++++----- + lib/dns/dnstap.c | 4 +-- + lib/dns/include/dns/message.h | 25 ++++++++++----- + lib/dns/message.c | 40 ++++++++++++++++++------ + lib/dns/resolver.c | 8 ++--- + lib/dns/tests/tsig_test.c | 8 ++--- + lib/dns/win32/libdns.def.in | 3 +- + lib/dns/xfrin.c | 8 ++--- + lib/dns/zone.c | 32 +++++++++---------- + lib/samples/nsprobe.c | 4 +-- + lib/samples/sample-request.c | 6 ++-- + 25 files changed, 144 insertions(+), 114 deletions(-) + +diff --git a/bin/dig/dighost.c b/bin/dig/dighost.c +index 4ec6c13a4e..4043aa3bf1 100644 +--- a/bin/dig/dighost.c ++++ b/bin/dig/dighost.c +@@ -1907,7 +1907,7 @@ destroy_lookup(dig_lookup_t *lookup) { + isc_mem_free(mctx, ptr); + } + if (lookup->sendmsg != NULL) +- dns_message_destroy(&lookup->sendmsg); ++ dns_message_detach(&lookup->sendmsg); + if (lookup->querysig != NULL) { + debug("freeing buffer %p", lookup->querysig); + isc_buffer_free(&lookup->querysig); +@@ -4013,7 +4013,7 @@ recv_done(isc_task_t *task, isc_event_t *event) { + printf(";; Got bad packet: %s\n", isc_result_totext(result)); + hex_dump(b); + query->waiting_connect = false; +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + isc_event_free(&event); + clear_query(query); + cancel_lookup(l); +@@ -4036,7 +4036,7 @@ recv_done(isc_task_t *task, isc_event_t *event) { + printf(";; Warning: Opcode mismatch: expected %s, got %s", + expect, got); + +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + if (l->tcp_mode) { + isc_event_free(&event); + clear_query(query); +@@ -4083,7 +4083,7 @@ recv_done(isc_task_t *task, isc_event_t *event) { + } + } + if (!match) { +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + if (l->tcp_mode) { + isc_event_free(&event); + clear_query(query); +@@ -4107,7 +4107,7 @@ recv_done(isc_task_t *task, isc_event_t *event) { + n = requeue_lookup(l, true); + if (l->trace && l->trace_root) + n->rdtype = l->qrdtype; +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + isc_event_free(&event); + clear_query(query); + cancel_lookup(l); +@@ -4125,7 +4125,7 @@ recv_done(isc_task_t *task, isc_event_t *event) { + n->tcp_mode = true; + if (l->trace && l->trace_root) + n->rdtype = l->qrdtype; +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + isc_event_free(&event); + clear_query(query); + cancel_lookup(l); +@@ -4146,7 +4146,7 @@ recv_done(isc_task_t *task, isc_event_t *event) { + n->seenbadcookie = true; + if (l->trace && l->trace_root) + n->rdtype = l->qrdtype; +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + isc_event_free(&event); + clear_query(query); + cancel_lookup(l); +@@ -4185,7 +4185,7 @@ recv_done(isc_task_t *task, isc_event_t *event) { + query->servname); + clear_query(query); + check_next_lookup(l); +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + isc_event_free(&event); + UNLOCK_LOOKUP; + return; +@@ -4360,7 +4360,7 @@ recv_done(isc_task_t *task, isc_event_t *event) { + debug("still pending."); + if (l->doing_xfr) { + if (query != l->xfr_q) { +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + isc_event_free(&event); + query->waiting_connect = false; + UNLOCK_LOOKUP; +@@ -4369,7 +4369,7 @@ recv_done(isc_task_t *task, isc_event_t *event) { + if (!docancel) + docancel = check_for_more_data(query, msg, sevent); + if (docancel) { +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + clear_query(query); + cancel_lookup(l); + check_next_lookup(l); +@@ -4391,7 +4391,7 @@ recv_done(isc_task_t *task, isc_event_t *event) { + #ifdef DIG_SIGCHASE + if (!do_sigchase) + #endif +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + + cancel_lookup(l); + } +@@ -4404,7 +4404,7 @@ recv_done(isc_task_t *task, isc_event_t *event) { + msg = NULL; + else + #endif +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + } + isc_event_free(&event); + UNLOCK_LOOKUP; +@@ -4658,7 +4658,7 @@ destroy_libs(void) { + + while (chase_msg != NULL) { + INSIST(chase_msg->msg != NULL); +- dns_message_destroy(&(chase_msg->msg)); ++ dns_message_detach(&(chase_msg->msg)); + ptr = chase_msg; + chase_msg = ISC_LIST_NEXT(chase_msg, link); + isc_mem_free(mctx, ptr); +@@ -4668,7 +4668,7 @@ destroy_libs(void) { + + while (chase_msg != NULL) { + INSIST(chase_msg->msg != NULL); +- dns_message_destroy(&(chase_msg->msg)); ++ dns_message_detach(&(chase_msg->msg)); + ptr = chase_msg; + chase_msg = ISC_LIST_NEXT(chase_msg, link); + isc_mem_free(mctx, ptr); +diff --git a/bin/dig/nslookup.c b/bin/dig/nslookup.c +index 647ed8ce50..c35283177d 100644 +--- a/bin/dig/nslookup.c ++++ b/bin/dig/nslookup.c +@@ -983,7 +983,7 @@ flush_lookup_list(void) { + + } + if (l->sendmsg != NULL) +- dns_message_destroy(&l->sendmsg); ++ dns_message_detach(&l->sendmsg); + lp = l; + l = ISC_LIST_NEXT(l, link); + ISC_LIST_DEQUEUE(lookup_list, lp, link); +diff --git a/bin/named/client.c b/bin/named/client.c +index 95dabbf53b..4a50ad9bae 100644 +--- a/bin/named/client.c ++++ b/bin/named/client.c +@@ -758,7 +758,7 @@ exit_check(ns_client_t *client) { + client->keytag_len = 0; + } + +- dns_message_destroy(&client->message); ++ dns_message_detach(&client->message); + + /* + * Detaching the task must be done after unlinking from +@@ -3375,7 +3375,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) { + client->magic = 0; + + cleanup_message: +- dns_message_destroy(&client->message); ++ dns_message_detach(&client->message); + + cleanup_timer: + isc_timer_detach(&client->timer); +diff --git a/bin/named/update.c b/bin/named/update.c +index 82f9d4a13d..ffd2caf7e4 100644 +--- a/bin/named/update.c ++++ b/bin/named/update.c +@@ -3440,7 +3440,7 @@ forward_done(isc_task_t *task, isc_event_t *event) { + INSIST(client->nupdates > 0); + client->nupdates--; + ns_client_sendraw(client, uev->answer); +- dns_message_destroy(&uev->answer); ++ dns_message_detach(&uev->answer); + isc_event_free(&event); + ns_client_detach(&client); + } +diff --git a/bin/named/xfrout.c b/bin/named/xfrout.c +index 1e139f7a34..7149825a3c 100644 +--- a/bin/named/xfrout.c ++++ b/bin/named/xfrout.c +@@ -1580,7 +1580,7 @@ sendstream(xfrout_ctx_t *xfr) { + } + + if (tcpmsg != NULL) +- dns_message_destroy(&tcpmsg); ++ dns_message_detach(&tcpmsg); + + if (cleanup_cctx) + dns_compress_invalidate(&cctx); +diff --git a/bin/nsupdate/nsupdate.c b/bin/nsupdate/nsupdate.c +index bf5ec8fe6b..4993265f58 100644 +--- a/bin/nsupdate/nsupdate.c ++++ b/bin/nsupdate/nsupdate.c +@@ -807,7 +807,7 @@ doshutdown(void) { + } + + if (updatemsg != NULL) +- dns_message_destroy(&updatemsg); ++ dns_message_detach(&updatemsg); + + if (is_dst_up) { + ddebug("Destroy DST lib"); +@@ -2556,7 +2556,7 @@ recvsoa(isc_task_t *task, isc_event_t *event) { + + if (shuttingdown) { + dns_request_destroy(&request); +- dns_message_destroy(&soaquery); ++ dns_message_detach(&soaquery); + isc_mem_put(gmctx, reqinfo, sizeof(nsu_requestinfo_t)); + isc_event_free(&event); + maybeshutdown(); +@@ -2587,7 +2587,7 @@ recvsoa(isc_task_t *task, isc_event_t *event) { + result = dns_request_getresponse(request, rcvmsg, + DNS_MESSAGEPARSE_PRESERVEORDER); + if (result == DNS_R_TSIGERRORSET && servers != NULL) { +- dns_message_destroy(&rcvmsg); ++ dns_message_detach(&rcvmsg); + ddebug("Destroying request [%p]", request); + dns_request_destroy(&request); + reqinfo = isc_mem_get(gmctx, sizeof(nsu_requestinfo_t)); +@@ -2628,9 +2628,9 @@ recvsoa(isc_task_t *task, isc_event_t *event) { + dns_name_format(userzone, namebuf, sizeof(namebuf)); + error("specified zone '%s' does not exist (NXDOMAIN)", + namebuf); +- dns_message_destroy(&rcvmsg); ++ dns_message_detach(&rcvmsg); + dns_request_destroy(&request); +- dns_message_destroy(&soaquery); ++ dns_message_detach(&soaquery); + ddebug("Out of recvsoa"); + done_update(); + seenerror = true; +@@ -2760,11 +2760,11 @@ recvsoa(isc_task_t *task, isc_event_t *event) { + setzoneclass(dns_rdataclass_none); + #endif + +- dns_message_destroy(&soaquery); ++ dns_message_detach(&soaquery); + dns_request_destroy(&request); + + out: +- dns_message_destroy(&rcvmsg); ++ dns_message_detach(&rcvmsg); + ddebug("Out of recvsoa"); + return; + +@@ -2973,7 +2973,7 @@ start_gssrequest(dns_name_t *master) { + + failure: + if (rmsg != NULL) +- dns_message_destroy(&rmsg); ++ dns_message_detach(&rmsg); + if (err_message != NULL) + isc_mem_free(gmctx, err_message); + failed_gssrequest(); +@@ -3047,7 +3047,7 @@ recvgss(isc_task_t *task, isc_event_t *event) { + + if (shuttingdown) { + dns_request_destroy(&request); +- dns_message_destroy(&tsigquery); ++ dns_message_detach(&tsigquery); + isc_mem_put(gmctx, reqinfo, sizeof(nsu_gssinfo_t)); + isc_event_free(&event); + maybeshutdown(); +@@ -3058,7 +3058,7 @@ recvgss(isc_task_t *task, isc_event_t *event) { + ddebug("Destroying request [%p]", request); + dns_request_destroy(&request); + if (!next_master("recvgss", addr, eresult)) { +- dns_message_destroy(&tsigquery); ++ dns_message_detach(&tsigquery); + failed_gssrequest(); + } else { + dns_message_renderreset(tsigquery); +@@ -3117,7 +3117,7 @@ recvgss(isc_task_t *task, isc_event_t *event) { + switch (result) { + + case DNS_R_CONTINUE: +- dns_message_destroy(&rcvmsg); ++ dns_message_detach(&rcvmsg); + dns_request_destroy(&request); + send_gssrequest(kserver, tsigquery, &request, context); + ddebug("Out of recvgss"); +@@ -3165,9 +3165,9 @@ recvgss(isc_task_t *task, isc_event_t *event) { + + done: + dns_request_destroy(&request); +- dns_message_destroy(&tsigquery); ++ dns_message_detach(&tsigquery); + +- dns_message_destroy(&rcvmsg); ++ dns_message_detach(&rcvmsg); + ddebug("Out of recvgss"); + } + #endif +@@ -3186,7 +3186,7 @@ start_update(void) { + + LOCK(&answer_lock); + if (answer != NULL) { +- dns_message_destroy(&answer); ++ dns_message_detach(&answer); + } + UNLOCK(&answer_lock); + +@@ -3231,7 +3231,7 @@ start_update(void) { + dns_message_puttempname(soaquery, &name); + dns_rdataset_disassociate(rdataset); + dns_message_puttemprdataset(soaquery, &rdataset); +- dns_message_destroy(&soaquery); ++ dns_message_detach(&soaquery); + done_update(); + return; + } +@@ -3268,7 +3268,7 @@ cleanup(void) { + + LOCK(&answer_lock); + if (answer != NULL) { +- dns_message_destroy(&answer); ++ dns_message_detach(&answer); + } + UNLOCK(&answer_lock); + +diff --git a/bin/tests/optional/gsstest.c b/bin/tests/optional/gsstest.c +index 9692c6f42e..901c2bbb91 100644 +--- a/bin/tests/optional/gsstest.c ++++ b/bin/tests/optional/gsstest.c +@@ -157,11 +157,11 @@ recvresponse(isc_task_t *task, isc_event_t *event) { + CHECK("dns_request_getresponse", result2); + + if (response != NULL) +- dns_message_destroy(&response); ++ dns_message_detach(&response); + + end: + if (query != NULL) +- dns_message_destroy(&query); ++ dns_message_detach(&query); + + if (reqev->request != NULL) + dns_request_destroy(&reqev->request); +@@ -248,7 +248,7 @@ sendquery(isc_task_t *task, isc_event_t *event) + if (qrdataset != NULL) + dns_message_puttemprdataset(message, &qrdataset); + if (message != NULL) +- dns_message_destroy(&message); ++ dns_message_detach(&message); + } + + static void +@@ -314,11 +314,11 @@ initctx2(isc_task_t *task, isc_event_t *event) { + tsigkey = NULL; + } + +- dns_message_destroy(&response); ++ dns_message_detach(&response); + + end: + if (query != NULL) +- dns_message_destroy(&query); ++ dns_message_detach(&query); + + if (reqev->request != NULL) + dns_request_destroy(&reqev->request); +diff --git a/bin/tests/optional/sig0_test.c b/bin/tests/optional/sig0_test.c +index 4084f226e5..a74bea1ba2 100644 +--- a/bin/tests/optional/sig0_test.c ++++ b/bin/tests/optional/sig0_test.c +@@ -114,7 +114,7 @@ recvdone(isc_task_t *task, isc_event_t *event) { + printf("%.*s\n", (int)isc_buffer_usedlength(&outbuf), + (char *)isc_buffer_base(&outbuf)); + +- dns_message_destroy(&response); ++ dns_message_detach(&response); + isc_event_free(&event); + + isc_app_shutdown(); +@@ -190,7 +190,7 @@ buildquery(void) { + inr.length = sizeof(rdata); + result = isc_socket_recv(s, &inr, 1, task1, recvdone, NULL); + CHECK("isc_socket_recv", result); +- dns_message_destroy(&query); ++ dns_message_detach(&query); + } + + int +diff --git a/bin/tests/system/pipelined/pipequeries.c b/bin/tests/system/pipelined/pipequeries.c +index 63ee7e9d2f..e16ec11681 100644 +--- a/bin/tests/system/pipelined/pipequeries.c ++++ b/bin/tests/system/pipelined/pipequeries.c +@@ -117,8 +117,8 @@ recvresponse(isc_task_t *task, isc_event_t *event) { + (char *)isc_buffer_base(&outbuf)); + fflush(stdout); + +- dns_message_destroy(&query); +- dns_message_destroy(&response); ++ dns_message_detach(&query); ++ dns_message_detach(&response); + dns_request_destroy(&reqev->request); + isc_event_free(&event); + +diff --git a/bin/tests/system/tkey/keycreate.c b/bin/tests/system/tkey/keycreate.c +index 0c0b08a90b..85b497f280 100644 +--- a/bin/tests/system/tkey/keycreate.c ++++ b/bin/tests/system/tkey/keycreate.c +@@ -117,8 +117,8 @@ recvquery(isc_task_t *task, isc_event_t *event) { + result = dst_key_tofile(tsigkey->key, type, ""); + CHECK("dst_key_tofile", result); + +- dns_message_destroy(&query); +- dns_message_destroy(&response); ++ dns_message_detach(&query); ++ dns_message_detach(&response); + dns_request_destroy(&reqev->request); + isc_event_free(&event); + isc_app_shutdown(); +diff --git a/bin/tests/system/tkey/keydelete.c b/bin/tests/system/tkey/keydelete.c +index ae26675755..ea7a09b014 100644 +--- a/bin/tests/system/tkey/keydelete.c ++++ b/bin/tests/system/tkey/keydelete.c +@@ -96,8 +96,8 @@ recvquery(isc_task_t *task, isc_event_t *event) { + result = dns_tkey_processdeleteresponse(query, response, ring); + CHECK("dns_tkey_processdhresponse", result); + +- dns_message_destroy(&query); +- dns_message_destroy(&response); ++ dns_message_detach(&query); ++ dns_message_detach(&response); + dns_request_destroy(&reqev->request); + isc_event_free(&event); + isc_app_shutdown(); +diff --git a/bin/tests/wire_test.c b/bin/tests/wire_test.c +index 0d001a2f77..7fb8446375 100644 +--- a/bin/tests/wire_test.c ++++ b/bin/tests/wire_test.c +@@ -328,7 +328,7 @@ process_message(isc_buffer_t *source) { + dns_compress_invalidate(&cctx); + + message->from_to_wire = DNS_MESSAGE_INTENTPARSE; +- dns_message_destroy(&message); ++ dns_message_detach(&message); + + printf("Message rendered.\n"); + if (printmemstats) +@@ -344,5 +344,5 @@ process_message(isc_buffer_t *source) { + result = printmessage(message); + CHECKRESULT(result, "printmessage() failed"); + } +- dns_message_destroy(&message); ++ dns_message_detach(&message); + } +diff --git a/bin/tools/dnstap-read.c b/bin/tools/dnstap-read.c +index 97c2f71346..cf0c884c02 100644 +--- a/bin/tools/dnstap-read.c ++++ b/bin/tools/dnstap-read.c +@@ -362,7 +362,7 @@ main(int argc, char *argv[]) { + if (handle != NULL) + dns_dt_close(&handle); + if (message != NULL) +- dns_message_destroy(&message); ++ dns_message_detach(&message); + if (b != NULL) + isc_buffer_free(&b); + isc_mem_destroy(&mctx); +diff --git a/bin/tools/mdig.c b/bin/tools/mdig.c +index 9a105cbd83..4dd12ee6f4 100644 +--- a/bin/tools/mdig.c ++++ b/bin/tools/mdig.c +@@ -478,9 +478,9 @@ cleanup: + if (style != NULL) + dns_master_styledestroy(&style, mctx); + if (query != NULL) +- dns_message_destroy(&query); ++ dns_message_detach(&query); + if (response != NULL) +- dns_message_destroy(&response); ++ dns_message_detach(&response); + dns_request_destroy(&reqev->request); + isc_event_free(&event); + +diff --git a/lib/dns/client.c b/lib/dns/client.c +index 6e2b1fe348..c2dca94bad 100644 +--- a/lib/dns/client.c ++++ b/lib/dns/client.c +@@ -1961,7 +1961,7 @@ static void + update_sendevent(updatectx_t *uctx, isc_result_t result) { + isc_task_t *task; + +- dns_message_destroy(&uctx->updatemsg); ++ dns_message_detach(&uctx->updatemsg); + if (uctx->tsigkey != NULL) + dns_tsigkey_detach(&uctx->tsigkey); + if (uctx->sig0key != NULL) +@@ -2012,7 +2012,7 @@ update_done(isc_task_t *task, isc_event_t *event) { + + out: + if (answer != NULL) +- dns_message_destroy(&answer); ++ dns_message_detach(&answer); + isc_event_free(&event); + + LOCK(&uctx->lock); +@@ -2354,7 +2354,7 @@ receive_soa(isc_task_t *task, isc_event_t *event) { + dns_request_t *newrequest = NULL; + + /* Retry SOA request without TSIG */ +- dns_message_destroy(&rcvmsg); ++ dns_message_detach(&rcvmsg); + dns_message_renderreset(uctx->soaquery); + reqoptions = 0; + if (uctx->want_tcp) +@@ -2477,14 +2477,14 @@ receive_soa(isc_task_t *task, isc_event_t *event) { + } + + if (!droplabel || result != ISC_R_SUCCESS) { +- dns_message_destroy(&uctx->soaquery); ++ dns_message_detach(&uctx->soaquery); + LOCK(&uctx->lock); + dns_request_destroy(&uctx->soareq); + UNLOCK(&uctx->lock); + } + + if (rcvmsg != NULL) +- dns_message_destroy(&rcvmsg); ++ dns_message_detach(&rcvmsg); + + if (result != ISC_R_SUCCESS) + update_sendevent(uctx, result); +@@ -2541,7 +2541,7 @@ request_soa(updatectx_t *uctx) { + } + if (name != NULL) + dns_message_puttempname(soaquery, &name); +- dns_message_destroy(&soaquery); ++ dns_message_detach(&soaquery); + + return (result); + } +@@ -3040,7 +3040,7 @@ dns_client_startupdate(dns_client_t *client, dns_rdataclass_t rdclass, + UNLOCK(&client->lock); + } + if (uctx->updatemsg != NULL) +- dns_message_destroy(&uctx->updatemsg); ++ dns_message_detach(&uctx->updatemsg); + while ((sa = ISC_LIST_HEAD(uctx->servers)) != NULL) { + ISC_LIST_UNLINK(uctx->servers, sa, link); + isc_mem_put(client->mctx, sa, sizeof(*sa)); +diff --git a/lib/dns/dnstap.c b/lib/dns/dnstap.c +index 4b289a9ea7..fad1e3b040 100644 +--- a/lib/dns/dnstap.c ++++ b/lib/dns/dnstap.c +@@ -1041,7 +1041,7 @@ dns_dt_parse(isc_mem_t *mctx, isc_region_t *src, dns_dtdata_t **destp) { + result = dns_message_parse(d->msg, &b, 0); + if (result != ISC_R_SUCCESS) { + if (result != DNS_R_RECOVERABLE) +- dns_message_destroy(&d->msg); ++ dns_message_detach(&d->msg); + result = ISC_R_SUCCESS; + } + +@@ -1248,7 +1248,7 @@ dns_dtdata_free(dns_dtdata_t **dp) { + d = *dp; + + if (d->msg != NULL) +- dns_message_destroy(&d->msg); ++ dns_message_detach(&d->msg); + if (d->frame != NULL) + dnstap__dnstap__free_unpacked(d->frame, NULL); + +diff --git a/lib/dns/include/dns/message.h b/lib/dns/include/dns/message.h +index 58de67a8af..f64522b43a 100644 +--- a/lib/dns/include/dns/message.h ++++ b/lib/dns/include/dns/message.h +@@ -21,6 +21,7 @@ + + #include + #include ++#include + + #include + #include +@@ -194,6 +195,7 @@ typedef struct dns_msgblock dns_msgblock_t; + struct dns_message { + /* public from here down */ + unsigned int magic; ++ isc_refcount_t refcount; + + dns_messageid_t id; + unsigned int flags; +@@ -310,8 +312,8 @@ dns_message_reset(dns_message_t *msg, unsigned int intent); + /*%< + * Reset a message structure to default state. All internal lists are freed + * or reset to a default state as well. This is simply a more efficient +- * way to call dns_message_destroy() followed by dns_message_allocate(), +- * since it avoid many memory allocations. ++ * way to call dns_message_detach() (assuming last reference is hold), ++ * followed by dns_message_create(), since it avoid many memory allocations. + * + * If any data loanouts (buffers, names, rdatas, etc) were requested, + * the caller must no longer use them after this call. +@@ -326,16 +328,23 @@ dns_message_reset(dns_message_t *msg, unsigned int intent); + */ + + void +-dns_message_destroy(dns_message_t **msgp); ++dns_message_attach(dns_message_t *source, dns_message_t **target); + /*%< +- * Destroy all state in the message. ++ * Attach to message 'source'. + * + * Requires: ++ *\li 'source' to be a valid message. ++ *\li 'target' to be non NULL and '*target' to be NULL. ++ */ ++ ++void ++dns_message_detach(dns_message_t **messagep); ++/*%< ++ * Detach *messagep from its message. ++ * list. + * +- *\li 'msgp' be valid. +- * +- * Ensures: +- *\li '*msgp' == NULL ++ * Requires: ++ *\li '*messagep' to be a valid message. + */ + + isc_result_t +diff --git a/lib/dns/message.c b/lib/dns/message.c +index 6a6d09e219..2812ab5a37 100644 +--- a/lib/dns/message.c ++++ b/lib/dns/message.c +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + #include /* Required for HP/UX (and others?) */ + #include + #include +@@ -554,7 +555,7 @@ msgresetsigs(dns_message_t *msg, bool replying) { + + /* + * Free all but one (or everything) for this message. This is used by +- * both dns_message_reset() and dns_message_destroy(). ++ * both dns_message_reset() and dns__message_destroy(). + */ + static void + msgreset(dns_message_t *msg, bool everything) { +@@ -794,6 +795,8 @@ dns_message_create(isc_mem_t *mctx, unsigned int intent, dns_message_t **msgp) + + m->cctx = NULL; + ++ isc_refcount_init(&m->refcount, 1); ++ + *msgp = m; + return (ISC_R_SUCCESS); + +@@ -826,23 +829,40 @@ dns_message_reset(dns_message_t *msg, unsigned int intent) { + msg->from_to_wire = intent; + } + +-void +-dns_message_destroy(dns_message_t **msgp) { +- dns_message_t *msg; +- +- REQUIRE(msgp != NULL); +- REQUIRE(DNS_MESSAGE_VALID(*msgp)); +- +- msg = *msgp; +- *msgp = NULL; ++static void ++dns__message_destroy(dns_message_t *msg) { ++ REQUIRE(msg != NULL); ++ REQUIRE(DNS_MESSAGE_VALID(msg)); + + msgreset(msg, true); + isc_mempool_destroy(&msg->namepool); + isc_mempool_destroy(&msg->rdspool); ++ isc_refcount_destroy(&msg->refcount); + msg->magic = 0; + isc_mem_putanddetach(&msg->mctx, msg, sizeof(dns_message_t)); + } + ++void ++dns_message_attach(dns_message_t *source, dns_message_t **target) { ++ REQUIRE(DNS_MESSAGE_VALID(source)); ++ ++ isc_refcount_increment(&source->refcount, NULL); ++ *target = source; ++} ++ ++void ++dns_message_detach(dns_message_t **messagep) { ++ REQUIRE(messagep != NULL && DNS_MESSAGE_VALID(*messagep)); ++ dns_message_t *msg = *messagep; ++ *messagep = NULL; ++ int32_t refs; ++ ++ isc_refcount_decrement(&msg->refcount, &refs); ++ if (refs == 0) { ++ dns__message_destroy(msg); ++ } ++} ++ + static isc_result_t + findname(dns_name_t **foundname, dns_name_t *target, + dns_namelist_t *section) +diff --git a/lib/dns/resolver.c b/lib/dns/resolver.c +index e67393aba9..daf9a277ec 100644 +--- a/lib/dns/resolver.c ++++ b/lib/dns/resolver.c +@@ -3966,8 +3966,8 @@ fctx_destroy(fetchctx_t *fctx) { + isc_counter_detach(&fctx->qc); + fcount_decr(fctx); + isc_timer_detach(&fctx->timer); +- dns_message_destroy(&fctx->rmessage); +- dns_message_destroy(&fctx->qmessage); ++ dns_message_detach(&fctx->rmessage); ++ dns_message_detach(&fctx->qmessage); + if (dns_name_countlabels(&fctx->domain) > 0) + dns_name_free(&fctx->domain, fctx->mctx); + if (dns_rdataset_isassociated(&fctx->nameservers)) +@@ -4578,10 +4578,10 @@ fctx_create(dns_resolver_t *res, dns_name_t *name, dns_rdatatype_t type, + return (ISC_R_SUCCESS); + + cleanup_rmessage: +- dns_message_destroy(&fctx->rmessage); ++ dns_message_detach(&fctx->rmessage); + + cleanup_qmessage: +- dns_message_destroy(&fctx->qmessage); ++ dns_message_detach(&fctx->qmessage); + + cleanup_fcount: + fcount_decr(fctx); +diff --git a/lib/dns/tests/tsig_test.c b/lib/dns/tests/tsig_test.c +index 112a0467f8..ff0c99f7f6 100644 +--- a/lib/dns/tests/tsig_test.c ++++ b/lib/dns/tests/tsig_test.c +@@ -261,7 +261,7 @@ render(isc_buffer_t *buf, unsigned flags, dns_tsigkey_t *key, + } + + dns_compress_invalidate(&cctx); +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + } + + /* +@@ -354,7 +354,7 @@ tsig_tcp_test(void **state) { + tsigctx = msg->tsigctx; + msg->tsigctx = NULL; + isc_buffer_free(&buf); +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + + result = dst_context_create3(key->key, mctx, DNS_LOGCATEGORY_DNSSEC, + false, &outctx); +@@ -412,7 +412,7 @@ tsig_tcp_test(void **state) { + tsigctx = msg->tsigctx; + msg->tsigctx = NULL; + isc_buffer_free(&buf); +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + + /* + * Create response message 3. +@@ -464,7 +464,7 @@ tsig_tcp_test(void **state) { + assert_int_equal(result, ISC_R_SUCCESS); + + isc_buffer_free(&buf); +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + + if (outctx != NULL) { + dst_context_destroy(&outctx); +diff --git a/lib/dns/win32/libdns.def.in b/lib/dns/win32/libdns.def.in +index b426f150dd..9c2ef79479 100644 +--- a/lib/dns/win32/libdns.def.in ++++ b/lib/dns/win32/libdns.def.in +@@ -514,11 +514,12 @@ dns_master_stylecreate2 + dns_master_styledestroy + dns_master_styleflags + dns_message_addname ++dns_message_attach + dns_message_buildopt + dns_message_checksig + dns_message_create + dns_message_currentname +-dns_message_destroy ++dns_message_detach + dns_message_find + dns_message_findname + dns_message_findtype +diff --git a/lib/dns/xfrin.c b/lib/dns/xfrin.c +index 1419d523b0..3a3f407289 100644 +--- a/lib/dns/xfrin.c ++++ b/lib/dns/xfrin.c +@@ -1198,7 +1198,7 @@ xfrin_send_request(dns_xfrin_ctx_t *xfr) { + if (qrdataset != NULL) + dns_message_puttemprdataset(msg, &qrdataset); + if (msg != NULL) +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + if (soatuple != NULL) + dns_difftuple_free(&soatuple); + if (ver != NULL) +@@ -1307,7 +1307,7 @@ xfrin_recv_done(isc_task_t *task, isc_event_t *ev) { + xfrin_log(xfr, ISC_LOG_DEBUG(3), "got %s, retrying with AXFR", + isc_result_totext(result)); + try_axfr: +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + xfrin_reset(xfr); + xfr->reqtype = dns_rdatatype_soa; + xfr->state = XFRST_SOAQUERY; +@@ -1419,7 +1419,7 @@ xfrin_recv_done(isc_task_t *task, isc_event_t *ev) { + xfr->tsigctx = msg->tsigctx; + msg->tsigctx = NULL; + +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + + switch (xfr->state) { + case XFRST_GOTSOA: +@@ -1464,7 +1464,7 @@ xfrin_recv_done(isc_task_t *task, isc_event_t *ev) { + + failure: + if (msg != NULL) +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + if (result != ISC_R_SUCCESS) + xfrin_fail(xfr, result, "failed while receiving responses"); + } +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index fd634af65f..6f40ebd636 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -11338,7 +11338,7 @@ notify_send_toaddr(isc_task_t *task, isc_event_t *event) { + if (key != NULL) + dns_tsigkey_detach(&key); + cleanup_message: +- dns_message_destroy(&message); ++ dns_message_detach(&message); + cleanup: + UNLOCK_ZONE(notify->zone); + isc_event_free(&event); +@@ -11887,7 +11887,7 @@ stub_callback(isc_task_t *task, isc_event_t *event) { + ZONEDB_UNLOCK(&zone->dblock, isc_rwlocktype_write); + dns_db_detach(&stub->db); + +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + isc_event_free(&event); + dns_request_destroy(&zone->request); + +@@ -11909,7 +11909,7 @@ stub_callback(isc_task_t *task, isc_event_t *event) { + if (stub->db != NULL) + dns_db_detach(&stub->db); + if (msg != NULL) +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + isc_event_free(&event); + dns_request_destroy(&zone->request); + /* +@@ -11956,7 +11956,7 @@ stub_callback(isc_task_t *task, isc_event_t *event) { + + same_master: + if (msg != NULL) +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + isc_event_free(&event); + dns_request_destroy(&zone->request); + ns_query(zone, NULL, stub); +@@ -12352,7 +12352,7 @@ refresh_callback(isc_task_t *task, isc_event_t *event) { + ns_query(zone, rdataset, NULL); + } + if (msg != NULL) +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + } else if (isc_serial_eq(soa.serial, oldserial)) { + isc_time_t expiretime; + uint32_t expire; +@@ -12387,12 +12387,12 @@ refresh_callback(isc_task_t *task, isc_event_t *event) { + goto next_master; + } + if (msg != NULL) +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + goto detach; + + next_master: + if (msg != NULL) +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + isc_event_free(&event); + dns_request_destroy(&zone->request); + /* +@@ -12444,7 +12444,7 @@ refresh_callback(isc_task_t *task, isc_event_t *event) { + + same_master: + if (msg != NULL) +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + isc_event_free(&event); + dns_request_destroy(&zone->request); + queue_soa_query(zone); +@@ -12541,7 +12541,7 @@ create_query(dns_zone_t *zone, dns_rdatatype_t rdtype, + if (qrdataset != NULL) + dns_message_puttemprdataset(message, &qrdataset); + if (message != NULL) +- dns_message_destroy(&message); ++ dns_message_detach(&message); + return (result); + } + +@@ -12751,7 +12751,7 @@ soa_query(isc_task_t *task, isc_event_t *event) { + if (result != ISC_R_SUCCESS) + DNS_ZONE_CLRFLAG(zone, DNS_ZONEFLG_REFRESH); + if (message != NULL) +- dns_message_destroy(&message); ++ dns_message_detach(&message); + if (cancel) + cancel_refresh(zone); + isc_event_free(&event); +@@ -12762,7 +12762,7 @@ soa_query(isc_task_t *task, isc_event_t *event) { + skip_master: + if (key != NULL) + dns_tsigkey_detach(&key); +- dns_message_destroy(&message); ++ dns_message_detach(&message); + /* + * Skip to next failed / untried master. + */ +@@ -12983,7 +12983,7 @@ ns_query(dns_zone_t *zone, dns_rdataset_t *soardataset, dns_stub_t *stub) { + dns_result_totext(result)); + goto cleanup; + } +- dns_message_destroy(&message); ++ dns_message_detach(&message); + goto unlock; + + cleanup: +@@ -13000,7 +13000,7 @@ ns_query(dns_zone_t *zone, dns_rdataset_t *soardataset, dns_stub_t *stub) { + isc_mem_put(stub->mctx, stub, sizeof(*stub)); + } + if (message != NULL) +- dns_message_destroy(&message); ++ dns_message_detach(&message); + unlock: + if (key != NULL) + dns_tsigkey_detach(&key); +@@ -13436,7 +13436,7 @@ notify_createmessage(dns_zone_t *zone, unsigned int flags, + dns_message_puttempname(message, &tempname); + if (temprdataset != NULL) + dns_message_puttemprdataset(message, &temprdataset); +- dns_message_destroy(&message); ++ dns_message_detach(&message); + return (result); + } + +@@ -14251,7 +14251,7 @@ notify_done(isc_task_t *task, isc_event_t *event) { + notify_destroy(notify, false); + } + if (message != NULL) +- dns_message_destroy(&message); ++ dns_message_detach(&message); + } + + struct secure_event { +@@ -16247,7 +16247,7 @@ forward_callback(isc_task_t *task, isc_event_t *event) { + + next_master: + if (msg != NULL) +- dns_message_destroy(&msg); ++ dns_message_detach(&msg); + isc_event_free(&event); + forward->which++; + dns_request_destroy(&forward->request); +diff --git a/lib/samples/nsprobe.c b/lib/samples/nsprobe.c +index f5cc6d6feb..9bf0e65e01 100644 +--- a/lib/samples/nsprobe.c ++++ b/lib/samples/nsprobe.c +@@ -1203,8 +1203,8 @@ main(int argc, char *argv[]) { + + /* Cleanup */ + for (i = 0; i < MAX_PROBES; i++) { +- dns_message_destroy(&probes[i].qmessage); +- dns_message_destroy(&probes[i].rmessage); ++ dns_message_detach(&probes[i].qmessage); ++ dns_message_detach(&probes[i].rmessage); + } + isc_task_detach(&probe_task); + dns_client_destroy(&client); +diff --git a/lib/samples/sample-request.c b/lib/samples/sample-request.c +index bfd806d28e..3a4123e244 100644 +--- a/lib/samples/sample-request.c ++++ b/lib/samples/sample-request.c +@@ -115,7 +115,7 @@ make_querymessage(dns_message_t *message, const char *namestr, + dns_message_puttempname(message, &qname); + if (qrdataset != NULL) + dns_message_puttemprdataset(message, &qrdataset); +- dns_message_destroy(&message); ++ dns_message_detach(&message); + return (result); + } + +@@ -255,8 +255,8 @@ main(int argc, char *argv[]) { + isc_buffer_free(&outputbuf); + + /* Cleanup */ +- dns_message_destroy(&qmessage); +- dns_message_destroy(&rmessage); ++ dns_message_detach(&qmessage); ++ dns_message_detach(&rmessage); + isc_mem_destroy(&mctx); + dns_client_destroy(&client); + dns_lib_shutdown(); +-- +2.23.0 + diff --git a/backport-0062-Fix-invalid-dns-message-state-in-resolver-s-logic.patch b/backport-0062-Fix-invalid-dns-message-state-in-resolver-s-logic.patch new file mode 100644 index 0000000..f43c7d7 --- /dev/null +++ b/backport-0062-Fix-invalid-dns-message-state-in-resolver-s-logic.patch @@ -0,0 +1,979 @@ +From d391a0b4c5ee02b16207ba64d0a61b5774694a62 Mon Sep 17 00:00:00 2001 +From: Diego Fronza +Date: Mon, 21 Sep 2020 17:32:39 -0300 +Subject: [PATCH] Fix invalid dns message state in resolver's logic + +The assertion failure REQUIRE(msg->state == DNS_SECTION_ANY), +caused by calling dns_message_setclass within function resquery_response() +in resolver.c, was happening due to wrong management of dns message_t +objects used to process responses to the queries issued by the resolver. + +Before the fix, a resolver's fetch context (fetchctx_t) would hold +a pointer to the message, this same reference would then be used over all +the attempts to resolve the query, trying next server, etc... for this to work +the message object would have it's state reset between each iteration, marking +it as ready for a new processing. + +The problem arose in a scenario with many different forwarders configured, +managing the state of the dns_message_t object was lacking better +synchronization, which have led it to a invalid dns_message_t state in +resquery_response(). + +Instead of adding unnecessarily complex code to synchronize the object, +the dns_message_t object was moved from fetchctx_t structure to the +query structure, where it better belongs to, since each query will produce +a response, this way whenever a new query is created an associated +dns_messate_t is also created. + +This commit deals mainly with moving the dns_message_t object from fetchctx_t +to the query structure. +Conflict: adapt is_lame function +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/d391a0b4c5ee02b16207ba64d0a61b5774694a62 + +--- + lib/dns/resolver.c | 285 +++++++++++++++++++++++++++------------------ + 1 file changed, 171 insertions(+), 114 deletions(-) + +diff --git a/lib/dns/resolver.c b/lib/dns/resolver.c +index b02742a..97b02e2 100644 +--- a/lib/dns/resolver.c ++++ b/lib/dns/resolver.c +@@ -208,6 +208,7 @@ typedef struct query { + /* Locked by task event serialization. */ + unsigned int magic; + fetchctx_t * fctx; ++ dns_message_t * rmessage; + isc_mem_t * mctx; + dns_dispatchmgr_t * dispatchmgr; + dns_dispatch_t * dispatch; +@@ -291,7 +292,6 @@ struct fetchctx { + isc_time_t expires; + isc_interval_t interval; + dns_message_t * qmessage; +- dns_message_t * rmessage; + ISC_LIST(resquery_t) queries; + dns_adbfindlist_t finds; + dns_adbfind_t * find; +@@ -413,8 +413,14 @@ struct fetchctx { + typedef struct { + dns_adbaddrinfo_t * addrinfo; + fetchctx_t * fctx; ++ dns_message_t * rmessage; + } dns_valarg_t; + ++typedef struct { ++ fetchctx_t * fctx; ++ dns_message_t * rmessage; ++} dns_chkarg_t; ++ + struct dns_fetch { + unsigned int magic; + isc_mem_t * mctx; +@@ -586,9 +592,12 @@ static isc_result_t ncache_adderesult(dns_message_t *message, + isc_result_t *eresultp); + static void validated(isc_task_t *task, isc_event_t *event); + static bool maybe_destroy(fetchctx_t *fctx, bool locked); +-static void add_bad(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, +- isc_result_t reason, badnstype_t badtype); +-static inline isc_result_t findnoqname(fetchctx_t *fctx, dns_name_t *name, ++static void add_bad(fetchctx_t *fctx, dns_message_t *rmessage, ++ dns_adbaddrinfo_t *addrinfo, isc_result_t reason, ++ badnstype_t badtype); ++static inline isc_result_t findnoqname(fetchctx_t *fctx, ++ dns_message_t *rmessage, ++ dns_name_t *name, + dns_rdatatype_t type, + dns_name_t **noqname); + static void fctx_increference(fetchctx_t *fctx); +@@ -610,7 +619,8 @@ dec_stats(dns_resolver_t *res, isc_statscounter_t counter) { + } + + static isc_result_t +-valcreate(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, dns_name_t *name, ++valcreate(fetchctx_t *fctx, dns_message_t *rmessage, ++ dns_adbaddrinfo_t *addrinfo, dns_name_t *name, + dns_rdatatype_t type, dns_rdataset_t *rdataset, + dns_rdataset_t *sigrdataset, unsigned int valoptions, + isc_task_t *task) +@@ -625,6 +635,7 @@ valcreate(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, dns_name_t *name, + + valarg->fctx = fctx; + valarg->addrinfo = addrinfo; ++ valarg->rmessage = rmessage; + + if (!ISC_LIST_EMPTY(fctx->validators)) + valoptions |= DNS_VALIDATOR_DEFER; +@@ -632,7 +643,7 @@ valcreate(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, dns_name_t *name, + valoptions &= ~DNS_VALIDATOR_DEFER; + + result = dns_validator_create(fctx->res->view, name, type, rdataset, +- sigrdataset, fctx->rmessage, ++ sigrdataset, rmessage, + valoptions, task, validated, valarg, + &validator); + if (result == ISC_R_SUCCESS) { +@@ -899,6 +910,8 @@ resquery_destroy(resquery_t **queryp) { + empty = fctx_decreference(query->fctx); + UNLOCK(&res->buckets[bucket].lock); + ++ dns_message_detach(&query->rmessage); ++ + query->magic = 0; + isc_mem_put(query->mctx, query, sizeof(*query)); + *queryp = NULL; +@@ -996,7 +1009,8 @@ fctx_cancelquery(resquery_t **queryp, dns_dispatchevent_t **deventp, + if (fctx->fwdpolicy == dns_fwdpolicy_first && + ISFORWARDER(query->addrinfo)) + { +- add_bad(fctx, query->addrinfo, ISC_R_TIMEDOUT, ++ add_bad(fctx, query->rmessage, ++ query->addrinfo, ISC_R_TIMEDOUT, + badns_forwarder); + } + +@@ -1540,7 +1554,8 @@ process_sendevent(resquery_t *query, isc_event_t *event) { + /* + * No route to remote. + */ +- add_bad(fctx, query->addrinfo, sevent->result, ++ add_bad(fctx, query->rmessage, query->addrinfo, ++ sevent->result, + badns_unreachable); + fctx_cancelquery(&query, NULL, NULL, true, + false); +@@ -1718,13 +1733,17 @@ fctx_query(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, + + INSIST(ISC_LIST_EMPTY(fctx->validators)); + +- dns_message_reset(fctx->rmessage, DNS_MESSAGE_INTENTPARSE); +- + query = isc_mem_get(fctx->mctx, sizeof(*query)); + if (query == NULL) { + result = ISC_R_NOMEMORY; + goto stop_idle_timer; + } ++ query->rmessage = NULL; ++ result = dns_message_create(fctx->mctx, DNS_MESSAGE_INTENTPARSE, ++ &query->rmessage); ++ if (result != ISC_R_SUCCESS) { ++ goto cleanup_query; ++ } + query->mctx = fctx->mctx; + query->options = options; + query->attributes = 0; +@@ -1792,7 +1811,7 @@ fctx_query(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, + break; + } + if (result != ISC_R_SUCCESS) +- goto cleanup_query; ++ goto cleanup_rmessage; + } + isc_sockaddr_setport(&addr, 0); + if (query->dscp == -1) +@@ -1802,7 +1821,7 @@ fctx_query(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, + isc_sockettype_tcp, + &query->tcpsocket); + if (result != ISC_R_SUCCESS) +- goto cleanup_query; ++ goto cleanup_rmessage; + + #ifndef BROKEN_TCP_BIND_BEFORE_CONNECT + result = isc_socket_bind(query->tcpsocket, &addr, 0); +@@ -1933,6 +1952,9 @@ fctx_query(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, + if (query->dispatch != NULL) + dns_dispatch_detach(&query->dispatch); + ++ cleanup_rmessage: ++ dns_message_detach(&query->rmessage); ++ + cleanup_query: + if (query->connects == 0) { + query->magic = 0; +@@ -2783,7 +2805,8 @@ resquery_connected(isc_task_t *task, isc_event_t *event) { + * exceeds 512 bytes from broken servers. + */ + if ((query->options & DNS_FETCHOPT_EDNS512) != 0) { +- add_bad(fctx, query->addrinfo, sevent->result, ++ add_bad(fctx, query->rmessage, ++ query->addrinfo, sevent->result, + badns_unreachable); + } + fctx_cancelquery(&query, NULL, NULL, true, false); +@@ -2976,8 +2999,8 @@ mark_bad(fetchctx_t *fctx) { + } + + static void +-add_bad(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, isc_result_t reason, +- badnstype_t badtype) ++add_bad(fetchctx_t *fctx, dns_message_t *rmessage, dns_adbaddrinfo_t *addrinfo, ++ isc_result_t reason, badnstype_t badtype) + { + char namebuf[DNS_NAME_FORMATSIZE]; + char addrbuf[ISC_SOCKADDR_FORMATSIZE]; +@@ -3034,18 +3057,18 @@ add_bad(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, isc_result_t reason, + return; + + if (reason == DNS_R_UNEXPECTEDRCODE && +- fctx->rmessage->rcode == dns_rcode_servfail && ++ rmessage->rcode == dns_rcode_servfail && + ISFORWARDER(addrinfo)) + return; + + if (reason == DNS_R_UNEXPECTEDRCODE) { + isc_buffer_init(&b, code, sizeof(code) - 1); +- dns_rcode_totext(fctx->rmessage->rcode, &b); ++ dns_rcode_totext(rmessage->rcode, &b); + code[isc_buffer_usedlength(&b)] = '\0'; + spc = " "; + } else if (reason == DNS_R_UNEXPECTEDOPCODE) { + isc_buffer_init(&b, code, sizeof(code) - 1); +- dns_opcode_totext((dns_opcode_t)fctx->rmessage->opcode, &b); ++ dns_opcode_totext((dns_opcode_t)rmessage->opcode, &b); + code[isc_buffer_usedlength(&b)] = '\0'; + spc = " "; + } else { +@@ -3970,7 +3993,6 @@ fctx_destroy(fetchctx_t *fctx) { + isc_counter_detach(&fctx->qc); + fcount_decr(fctx); + isc_timer_detach(&fctx->timer); +- dns_message_detach(&fctx->rmessage); + dns_message_detach(&fctx->qmessage); + if (dns_name_countlabels(&fctx->domain) > 0) + dns_name_free(&fctx->domain, fctx->mctx); +@@ -4512,13 +4534,6 @@ fctx_create(dns_resolver_t *res, dns_name_t *name, dns_rdatatype_t type, + if (result != ISC_R_SUCCESS) + goto cleanup_fcount; + +- fctx->rmessage = NULL; +- result = dns_message_create(mctx, DNS_MESSAGE_INTENTPARSE, +- &fctx->rmessage); +- +- if (result != ISC_R_SUCCESS) +- goto cleanup_qmessage; +- + /* + * Compute an expiration time for the entire fetch. + */ +@@ -4530,7 +4545,7 @@ fctx_create(dns_resolver_t *res, dns_name_t *name, dns_rdatatype_t type, + "isc_time_nowplusinterval: %s", + isc_result_totext(iresult)); + result = ISC_R_UNEXPECTED; +- goto cleanup_rmessage; ++ goto cleanup_qmessage; + } + + /* +@@ -4554,7 +4569,7 @@ fctx_create(dns_resolver_t *res, dns_name_t *name, dns_rdatatype_t type, + "isc_timer_create: %s", + isc_result_totext(iresult)); + result = ISC_R_UNEXPECTED; +- goto cleanup_rmessage; ++ goto cleanup_qmessage; + } + + /* +@@ -4582,9 +4597,6 @@ fctx_create(dns_resolver_t *res, dns_name_t *name, dns_rdatatype_t type, + + return (ISC_R_SUCCESS); + +- cleanup_rmessage: +- dns_message_detach(&fctx->rmessage); +- + cleanup_qmessage: + dns_message_detach(&fctx->qmessage); + +@@ -4618,8 +4630,7 @@ fctx_create(dns_resolver_t *res, dns_name_t *name, dns_rdatatype_t type, + * Handle Responses + */ + static inline bool +-is_lame(fetchctx_t *fctx) { +- dns_message_t *message = fctx->rmessage; ++is_lame(fetchctx_t *fctx, dns_message_t *message) { + dns_name_t *name; + dns_rdataset_t *rdataset; + isc_result_t result; +@@ -4696,9 +4707,8 @@ log_formerr(fetchctx_t *fctx, const char *format, ...) { + } + + static isc_result_t +-same_question(fetchctx_t *fctx) { ++same_question(fetchctx_t *fctx, dns_message_t *message) { + isc_result_t result; +- dns_message_t *message = fctx->rmessage; + dns_name_t *name; + dns_rdataset_t *rdataset; + +@@ -4892,6 +4902,7 @@ validated(isc_task_t *task, isc_event_t *event) { + uint32_t ttl; + unsigned options; + uint32_t bucketnum; ++ dns_message_t *rmessage; + + UNUSED(task); /* for now */ + +@@ -4902,6 +4913,7 @@ validated(isc_task_t *task, isc_event_t *event) { + res = fctx->res; + addrinfo = valarg->addrinfo; + REQUIRE(!ISC_LIST_EMPTY(fctx->validators)); ++ rmessage = valarg->rmessage; + + vevent = (dns_validatorevent_t *)event; + fctx->vresult = vevent->result; +@@ -5033,7 +5045,8 @@ validated(isc_task_t *task, isc_event_t *event) { + dns_db_detachnode(fctx->cache, &node); + } + result = fctx->vresult; +- add_bad(fctx, addrinfo, result, badns_validation); ++ add_bad(fctx, rmessage, addrinfo, result, ++ badns_validation); + isc_event_free(&event); + UNLOCK(&res->buckets[bucketnum].lock); + INSIST(fctx->validator == NULL); +@@ -5072,7 +5085,7 @@ validated(isc_task_t *task, isc_event_t *event) { + /* + * Cache DS NXDOMAIN separately to other types. + */ +- if (fctx->rmessage->rcode == dns_rcode_nxdomain && ++ if (rmessage->rcode == dns_rcode_nxdomain && + fctx->type != dns_rdatatype_ds) + covers = dns_rdatatype_any; + else +@@ -5093,7 +5106,7 @@ validated(isc_task_t *task, isc_event_t *event) { + covers == dns_rdatatype_any && res->zero_no_soa_ttl) + ttl = 0; + +- result = ncache_adderesult(fctx->rmessage, fctx->cache, node, ++ result = ncache_adderesult(rmessage, fctx->cache, node, + covers, now, ttl, vevent->optout, + vevent->secure, ardataset, &eresult); + if (result != ISC_R_SUCCESS) +@@ -5120,7 +5133,7 @@ validated(isc_task_t *task, isc_event_t *event) { + { + isc_result_t tresult; + dns_name_t *noqname = NULL; +- tresult = findnoqname(fctx, vevent->name, ++ tresult = findnoqname(fctx, rmessage, vevent->name, + vevent->rdataset->type, &noqname); + if (tresult == ISC_R_SUCCESS && noqname != NULL) { + tresult = dns_rdataset_addnoqname(vevent->rdataset, +@@ -5198,10 +5211,10 @@ validated(isc_task_t *task, isc_event_t *event) { + /* + * Cache any NS/NSEC records that happened to be validated. + */ +- result = dns_message_firstname(fctx->rmessage, DNS_SECTION_AUTHORITY); ++ result = dns_message_firstname(rmessage, DNS_SECTION_AUTHORITY); + while (result == ISC_R_SUCCESS) { + name = NULL; +- dns_message_currentname(fctx->rmessage, DNS_SECTION_AUTHORITY, ++ dns_message_currentname(rmessage, DNS_SECTION_AUTHORITY, + &name); + for (rdataset = ISC_LIST_HEAD(name->list); + rdataset != NULL; +@@ -5237,7 +5250,7 @@ validated(isc_task_t *task, isc_event_t *event) { + if (result != ISC_R_SUCCESS) + continue; + } +- result = dns_message_nextname(fctx->rmessage, ++ result = dns_message_nextname(rmessage, + DNS_SECTION_AUTHORITY); + } + +@@ -5297,7 +5310,8 @@ fctx_log(void *arg, int level, const char *fmt, ...) { + } + + static inline isc_result_t +-findnoqname(fetchctx_t *fctx, dns_name_t *name, dns_rdatatype_t type, ++findnoqname(fetchctx_t *fctx, dns_message_t *rmessage, ++ dns_name_t *name, dns_rdatatype_t type, + dns_name_t **noqnamep) + { + dns_rdataset_t *nrdataset, *next, *sigrdataset; +@@ -5359,11 +5373,11 @@ findnoqname(fetchctx_t *fctx, dns_name_t *name, dns_rdatatype_t type, + #define NXND(x) ((x) == ISC_R_SUCCESS) + + section = DNS_SECTION_AUTHORITY; +- for (result = dns_message_firstname(fctx->rmessage, section); ++ for (result = dns_message_firstname(rmessage, section); + result == ISC_R_SUCCESS; +- result = dns_message_nextname(fctx->rmessage, section)) { ++ result = dns_message_nextname(rmessage, section)) { + dns_name_t *nsec = NULL; +- dns_message_currentname(fctx->rmessage, section, &nsec); ++ dns_message_currentname(rmessage, section, &nsec); + for (nrdataset = ISC_LIST_HEAD(nsec->list); + nrdataset != NULL; nrdataset = next) { + bool data = false, exists = false; +@@ -5422,7 +5436,8 @@ findnoqname(fetchctx_t *fctx, dns_name_t *name, dns_rdatatype_t type, + } + + static inline isc_result_t +-cache_name(fetchctx_t *fctx, dns_name_t *name, dns_adbaddrinfo_t *addrinfo, ++cache_name(fetchctx_t *fctx, dns_message_t *rmessage, ++ dns_name_t *name, dns_adbaddrinfo_t *addrinfo, + isc_stdtime_t now) + { + dns_rdataset_t *rdataset = NULL, *sigrdataset = NULL; +@@ -5657,7 +5672,8 @@ cache_name(fetchctx_t *fctx, dns_name_t *name, dns_adbaddrinfo_t *addrinfo, + { + isc_result_t tresult; + dns_name_t *noqname = NULL; +- tresult = findnoqname(fctx, name, ++ tresult = findnoqname(fctx, rmessage, ++ name, + rdataset->type, + &noqname); + if (tresult == ISC_R_SUCCESS && +@@ -5758,8 +5774,9 @@ cache_name(fetchctx_t *fctx, dns_name_t *name, dns_adbaddrinfo_t *addrinfo, + * having to remember which + * rdatasets needed validation. + */ +- result = valcreate(fctx, addrinfo, +- name, rdataset->type, ++ result = valcreate(fctx, rmessage, ++ addrinfo, name, ++ rdataset->type, + rdataset, + sigrdataset, + valoptions, task); +@@ -5818,7 +5835,7 @@ cache_name(fetchctx_t *fctx, dns_name_t *name, dns_adbaddrinfo_t *addrinfo, + { + isc_result_t tresult; + dns_name_t *noqname = NULL; +- tresult = findnoqname(fctx, name, ++ tresult = findnoqname(fctx, rmessage, name, + rdataset->type, &noqname); + if (tresult == ISC_R_SUCCESS && + noqname != NULL) +@@ -5870,8 +5887,9 @@ cache_name(fetchctx_t *fctx, dns_name_t *name, dns_adbaddrinfo_t *addrinfo, + vtype = dns_rdatatype_dname; + } + } +- result = valcreate(fctx, addrinfo, name, vtype, valrdataset, +- valsigrdataset, valoptions, task); ++ result = valcreate(fctx, rmessage, addrinfo, name, vtype, ++ valrdataset, valsigrdataset, valoptions, ++ task); + } + + if (result == ISC_R_SUCCESS && have_answer) { +@@ -5907,7 +5925,8 @@ cache_name(fetchctx_t *fctx, dns_name_t *name, dns_adbaddrinfo_t *addrinfo, + } + + static inline isc_result_t +-cache_message(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, isc_stdtime_t now) ++cache_message(fetchctx_t *fctx, dns_message_t *rmessage, ++ dns_adbaddrinfo_t *addrinfo, isc_stdtime_t now) + { + isc_result_t result; + dns_section_t section; +@@ -5922,17 +5941,18 @@ cache_message(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, isc_stdtime_t now) + for (section = DNS_SECTION_ANSWER; + section <= DNS_SECTION_ADDITIONAL; + section++) { +- result = dns_message_firstname(fctx->rmessage, section); ++ result = dns_message_firstname(rmessage, section); + while (result == ISC_R_SUCCESS) { + name = NULL; +- dns_message_currentname(fctx->rmessage, section, ++ dns_message_currentname(rmessage, section, + &name); + if ((name->attributes & DNS_NAMEATTR_CACHE) != 0) { +- result = cache_name(fctx, name, addrinfo, now); ++ result = cache_name(fctx, rmessage, name, ++ addrinfo, now); + if (result != ISC_R_SUCCESS) + break; + } +- result = dns_message_nextname(fctx->rmessage, section); ++ result = dns_message_nextname(rmessage, section); + } + if (result != ISC_R_NOMORE) + break; +@@ -6002,8 +6022,9 @@ ncache_adderesult(dns_message_t *message, dns_db_t *cache, dns_dbnode_t *node, + } + + static inline isc_result_t +-ncache_message(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, +- dns_rdatatype_t covers, isc_stdtime_t now) ++ncache_message(fetchctx_t *fctx, dns_message_t *rmessage, ++ dns_adbaddrinfo_t *addrinfo, dns_rdatatype_t covers, ++ isc_stdtime_t now) + { + isc_result_t result, eresult; + dns_name_t *name; +@@ -6034,7 +6055,7 @@ ncache_message(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, + * XXXMPA remove when we follow cnames and adjust the setting + * of FCTX_ATTR_WANTNCACHE in noanswer_response(). + */ +- INSIST(fctx->rmessage->counts[DNS_SECTION_ANSWER] == 0); ++ INSIST(rmessage->counts[DNS_SECTION_ANSWER] == 0); + + /* + * Is DNSSEC validation required for this name? +@@ -6071,18 +6092,18 @@ ncache_message(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, + dns_rdataset_t *trdataset; + dns_name_t *tname; + +- result = dns_message_firstname(fctx->rmessage, ++ result = dns_message_firstname(rmessage, + DNS_SECTION_AUTHORITY); + while (result == ISC_R_SUCCESS) { + tname = NULL; +- dns_message_currentname(fctx->rmessage, ++ dns_message_currentname(rmessage, + DNS_SECTION_AUTHORITY, + &tname); + for (trdataset = ISC_LIST_HEAD(tname->list); + trdataset != NULL; + trdataset = ISC_LIST_NEXT(trdataset, link)) + trdataset->trust = dns_trust_pending_answer; +- result = dns_message_nextname(fctx->rmessage, ++ result = dns_message_nextname(rmessage, + DNS_SECTION_AUTHORITY); + } + if (result != ISC_R_NOMORE) +@@ -6094,7 +6115,7 @@ ncache_message(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, + /* + * Do negative response validation. + */ +- result = valcreate(fctx, addrinfo, name, fctx->type, ++ result = valcreate(fctx, rmessage, addrinfo, name, fctx->type, + NULL, NULL, valoptions, + res->buckets[fctx->bucketnum].task); + /* +@@ -6140,7 +6161,7 @@ ncache_message(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, + fctx->res->zero_no_soa_ttl) + ttl = 0; + +- result = ncache_adderesult(fctx->rmessage, fctx->cache, node, ++ result = ncache_adderesult(rmessage, fctx->cache, node, + covers, now, ttl, false, + false, ardataset, &eresult); + if (result != ISC_R_SUCCESS) +@@ -6201,7 +6222,9 @@ static isc_result_t + check_section(void *arg, dns_name_t *addname, dns_rdatatype_t type, + dns_section_t section) + { +- fetchctx_t *fctx = arg; ++ dns_chkarg_t *chkarg = arg; ++ fetchctx_t *fctx = chkarg->fctx; ++ dns_message_t *rmessage = chkarg->rmessage; + isc_result_t result; + dns_name_t *name = NULL; + dns_rdataset_t *rdataset = NULL; +@@ -6220,7 +6243,7 @@ check_section(void *arg, dns_name_t *addname, dns_rdatatype_t type, + (fctx->type == dns_rdatatype_ns && + dns_name_equal(&fctx->name, dns_rootname))); + +- result = dns_message_findname(fctx->rmessage, section, addname, ++ result = dns_message_findname(rmessage, section, addname, + dns_rdatatype_any, 0, &name, NULL); + if (result == ISC_R_SUCCESS) { + external = !dns_name_issubdomain(name, &fctx->domain); +@@ -6256,6 +6279,9 @@ check_section(void *arg, dns_name_t *addname, dns_rdatatype_t type, + } + } + ++ dns_message_detach(&chkarg->rmessage); ++ isc_mem_put(fctx->mctx, chkarg, sizeof(*chkarg)); ++ + return (ISC_R_SUCCESS); + } + +@@ -6275,7 +6301,7 @@ check_answer(void *arg, dns_name_t *addname, dns_rdatatype_t type) { + #endif + + static void +-chase_additional(fetchctx_t *fctx) { ++chase_additional(fetchctx_t *fctx, dns_message_t *rmessage) { + bool rescan; + dns_section_t section = DNS_SECTION_ADDITIONAL; + isc_result_t result; +@@ -6283,12 +6309,12 @@ chase_additional(fetchctx_t *fctx) { + again: + rescan = false; + +- for (result = dns_message_firstname(fctx->rmessage, section); ++ for (result = dns_message_firstname(rmessage, section); + result == ISC_R_SUCCESS; +- result = dns_message_nextname(fctx->rmessage, section)) { ++ result = dns_message_nextname(rmessage, section)) { + dns_name_t *name = NULL; + dns_rdataset_t *rdataset; +- dns_message_currentname(fctx->rmessage, DNS_SECTION_ADDITIONAL, ++ dns_message_currentname(rmessage, DNS_SECTION_ADDITIONAL, + &name); + if ((name->attributes & DNS_NAMEATTR_CHASE) == 0) + continue; +@@ -6297,10 +6323,15 @@ chase_additional(fetchctx_t *fctx) { + rdataset != NULL; + rdataset = ISC_LIST_NEXT(rdataset, link)) { + if (CHASE(rdataset)) { ++ dns_chkarg_t *chkarg; ++ chkarg = isc_mem_get(fctx->mctx, ++ sizeof(*chkarg)); ++ chkarg->fctx = fctx; ++ dns_message_attach(rmessage, &chkarg->rmessage); + rdataset->attributes &= ~DNS_RDATASETATTR_CHASE; + (void)dns_rdataset_additionaldata(rdataset, + check_related, +- fctx); ++ chkarg); + rescan = true; + } + } +@@ -6532,11 +6563,10 @@ trim_ns_ttl(fetchctx_t *fctx, dns_name_t *name, dns_rdataset_t *rdataset) { + #define LOOK_FOR_GLUE_IN_ANSWER 0x2 + + static isc_result_t +-noanswer_response(fetchctx_t *fctx, dns_name_t *oqname, +- unsigned int look_in_options) ++noanswer_response(fetchctx_t *fctx, dns_message_t *message, ++ dns_name_t *oqname, unsigned int look_in_options) + { + isc_result_t result; +- dns_message_t *message; + dns_name_t *name, *qname, *ns_name, *soa_name, *ds_name, *save_name; + dns_rdataset_t *rdataset, *ns_rdataset; + bool aa, negative_response; +@@ -6551,8 +6581,6 @@ noanswer_response(fetchctx_t *fctx, dns_name_t *oqname, + } else + section = DNS_SECTION_AUTHORITY; + +- message = fctx->rmessage; +- + /* + * Setup qname. + */ +@@ -6905,6 +6933,7 @@ noanswer_response(fetchctx_t *fctx, dns_name_t *oqname, + * we're not following a chain.) + */ + if (!negative_response && ns_name != NULL && oqname == NULL) { ++ dns_chkarg_t *chkarg; + /* + * We already know ns_name is a subdomain of fctx->domain. + * If ns_name is equal to fctx->domain, we're not making +@@ -6934,8 +6963,12 @@ noanswer_response(fetchctx_t *fctx, dns_name_t *oqname, + */ + INSIST(ns_rdataset != NULL); + FCTX_ATTR_SET(fctx, FCTX_ATTR_GLUING); ++ chkarg = isc_mem_get(fctx->mctx, ++ sizeof(*chkarg)); ++ chkarg->fctx = fctx; ++ dns_message_attach(message, &chkarg->rmessage); + (void)dns_rdataset_additionaldata(ns_rdataset, check_related, +- fctx); ++ chkarg); + #if CHECK_FOR_GLUE_IN_ANSWER + /* + * Look in the answer section for "glue" that is incorrectly +@@ -6946,9 +6979,14 @@ noanswer_response(fetchctx_t *fctx, dns_name_t *oqname, + */ + if ((look_in_options & LOOK_FOR_GLUE_IN_ANSWER) != 0 && + (fctx->type == dns_rdatatype_aaaa || +- fctx->type == dns_rdatatype_a)) ++ fctx->type == dns_rdatatype_a)) { ++ dns_chkarg_t *chkarg; ++ chkarg = isc_mem_get(fctx->mctx, sizeof(*chkarg)); ++ chkarg->fctx = fctx; ++ dns_message_attach(message, &chkarg->rmessage); + (void)dns_rdataset_additionaldata(ns_rdataset, +- check_answer, fctx); ++ check_answer, chkarg); ++ } + #endif + FCTX_ATTR_CLR(fctx, FCTX_ATTR_GLUING); + /* +@@ -7023,9 +7061,8 @@ validinanswer(dns_rdataset_t *rdataset, fetchctx_t *fctx) { + } + + static isc_result_t +-answer_response(fetchctx_t *fctx) { ++answer_response(fetchctx_t *fctx, dns_message_t *message) { + isc_result_t result; +- dns_message_t *message = NULL; + dns_name_t *name = NULL, *qname = NULL, *ns_name = NULL; + dns_name_t *aname = NULL, *cname = NULL, *dname = NULL; + dns_rdataset_t *rdataset = NULL, *sigrdataset = NULL; +@@ -7042,7 +7079,6 @@ answer_response(fetchctx_t *fctx) { + + FCTXTRACE("answer_response"); + +- message = fctx->rmessage; + qname = &fctx->name; + view = fctx->res->view; + type = fctx->type; +@@ -7151,6 +7187,7 @@ answer_response(fetchctx_t *fctx) { + rdataset != NULL; + rdataset = ISC_LIST_NEXT(rdataset, link)) + { ++ dns_chkarg_t *chkarg; + if (!validinanswer(rdataset, fctx)) { + return (DNS_R_FORMERR); + } +@@ -7178,11 +7215,17 @@ answer_response(fetchctx_t *fctx) { + rdataset->attributes |= DNS_RDATASETATTR_ANSWER; + rdataset->attributes |= DNS_RDATASETATTR_CACHE; + rdataset->trust = trust; ++ rdataset->attributes &= ~DNS_RDATASETATTR_CHASE; ++ chkarg = isc_mem_get(fctx->mctx, ++ sizeof(*chkarg)); ++ chkarg->fctx = fctx; ++ dns_message_attach(message, &chkarg->rmessage); + (void)dns_rdataset_additionaldata(rdataset, + check_related, +- fctx); ++ chkarg); + } + } else if (aname != NULL) { ++ dns_chkarg_t *chkarg; + if (!validinanswer(ardataset, fctx)) + return (DNS_R_FORMERR); + if ((ardataset->type == dns_rdatatype_a || +@@ -7204,8 +7247,12 @@ answer_response(fetchctx_t *fctx) { + ardataset->attributes |= DNS_RDATASETATTR_ANSWER; + ardataset->attributes |= DNS_RDATASETATTR_CACHE; + ardataset->trust = trust; ++ chkarg = isc_mem_get(fctx->mctx, ++ sizeof(*chkarg)); ++ chkarg->fctx = fctx; ++ dns_message_attach(message, &chkarg->rmessage); + (void)dns_rdataset_additionaldata(ardataset, check_related, +- fctx); ++ chkarg); + for (sigrdataset = ISC_LIST_HEAD(aname->list); + sigrdataset != NULL; + sigrdataset = ISC_LIST_NEXT(sigrdataset, link)) { +@@ -7343,6 +7390,7 @@ answer_response(fetchctx_t *fctx) { + if (rdataset->type == dns_rdatatype_ns || + (rdataset->type == dns_rdatatype_rrsig && + rdataset->covers == dns_rdatatype_ns)) { ++ dns_chkarg_t *chkarg; + name->attributes |= + DNS_NAMEATTR_CACHE; + rdataset->attributes |= +@@ -7364,10 +7412,15 @@ answer_response(fetchctx_t *fctx) { + * Mark any additional data related + * to this rdataset. + */ ++ chkarg = isc_mem_get(fctx->mctx, ++ sizeof(*chkarg)); ++ chkarg->fctx = fctx; ++ dns_message_attach(message, ++ &chkarg->rmessage); + (void)dns_rdataset_additionaldata( + rdataset, + check_related, +- fctx); ++ chkarg); + done = true; + } + } +@@ -7679,21 +7732,20 @@ log_nsid(isc_buffer_t *opt, size_t nsid_len, resquery_t *query, + } + + static bool +-iscname(fetchctx_t *fctx) { ++iscname(fetchctx_t *fctx, dns_message_t *rmessage) { + isc_result_t result; + +- result = dns_message_findname(fctx->rmessage, DNS_SECTION_ANSWER, ++ result = dns_message_findname(rmessage, DNS_SECTION_ANSWER, + &fctx->name, dns_rdatatype_cname, 0, + NULL, NULL); + return (result == ISC_R_SUCCESS ? true : false); + } + + static bool +-betterreferral(fetchctx_t *fctx) { ++betterreferral(fetchctx_t *fctx, dns_message_t *message) { + isc_result_t result; + dns_name_t *name; + dns_rdataset_t *rdataset; +- dns_message_t *message = fctx->rmessage; + + for (result = dns_message_firstname(message, DNS_SECTION_AUTHORITY); + result == ISC_R_SUCCESS; +@@ -7754,11 +7806,11 @@ process_opt(resquery_t *query, dns_rdataset_t *opt) { + } + optvalue = isc_buffer_current(&optbuf); + compute_cc(query, cookie, sizeof(cookie)); +- INSIST(query->fctx->rmessage->cc_bad == 0 && +- query->fctx->rmessage->cc_ok == 0); ++ INSIST(query->rmessage->cc_bad == 0 && ++ query->rmessage->cc_ok == 0); + if (optlen >= 8U && + memcmp(cookie, optvalue, 8) == 0) { +- query->fctx->rmessage->cc_ok = 1; ++ query->rmessage->cc_ok = 1; + inc_stats(query->fctx->res, + dns_resstatscounter_cookieok); + addrinfo = query->addrinfo; +@@ -7766,7 +7818,7 @@ process_opt(resquery_t *query, dns_rdataset_t *opt) { + addrinfo, optvalue, + optlen); + } else +- query->fctx->rmessage->cc_bad = 1; ++ query->rmessage->cc_bad = 1; + isc_buffer_forward(&optbuf, optlen); + inc_stats(query->fctx->res, + dns_resstatscounter_cookiein); +@@ -7900,7 +7952,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + goto done; + } + +- message = fctx->rmessage; ++ message = query->rmessage; + + if (query->tsig != NULL) { + result = dns_message_setquerytsig(message, query->tsig); +@@ -8092,7 +8144,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + case dns_rcode_refused: + case dns_rcode_servfail: + default: +- result = same_question(fctx); ++ result = same_question(fctx, message); + if (result != ISC_R_SUCCESS) { + FCTXTRACE3("response did not match question", result); + nextitem = true; +@@ -8397,7 +8449,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + } + + isc_buffer_init(&b, code, sizeof(code) - 1); +- dns_rcode_totext(fctx->rmessage->rcode, &b); ++ dns_rcode_totext(message->rcode, &b); + code[isc_buffer_usedlength(&b)] = '\0'; + FCTXTRACE2("remote server broken: returned ", code); + goto done; +@@ -8406,7 +8458,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * Is the server lame? + */ +- if (!ISFORWARDER(query->addrinfo) && is_lame(fctx)) { ++ if (!ISFORWARDER(query->addrinfo) && is_lame(fctx, message)) { + inc_stats(res, dns_resstatscounter_lame); + log_lame(fctx, query->addrinfo); + if (res->lame_ttl != 0) { +@@ -8477,10 +8529,10 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + if ((message->flags & DNS_MESSAGEFLAG_AA) != 0 || + ISFORWARDER(query->addrinfo)) + { +- result = answer_response(fctx); ++ result = answer_response(fctx, message); + if (result != ISC_R_SUCCESS) + FCTXTRACE3("answer_response (AA/fwd)", result); +- } else if (iscname(fctx) && ++ } else if (iscname(fctx, message) && + fctx->type != dns_rdatatype_any && + fctx->type != dns_rdatatype_cname) + { +@@ -8489,16 +8541,16 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + * answer when a CNAME is followed. We should treat + * it as a valid answer. + */ +- result = answer_response(fctx); ++ result = answer_response(fctx, message); + if (result != ISC_R_SUCCESS) + FCTXTRACE3("answer_response (!ANY/!CNAME)", + result); + } else if (fctx->type != dns_rdatatype_ns && +- !betterreferral(fctx)) { ++ !betterreferral(fctx, message)) { + /* + * Lame response !!!. + */ +- result = answer_response(fctx); ++ result = answer_response(fctx, message); + if (result != ISC_R_SUCCESS) + FCTXTRACE3("answer_response (!NS)", result); + } else { +@@ -8511,8 +8563,10 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + * validation, we must invoke the following + * special kludge to treat it as a referral. + */ +- result = noanswer_response(fctx, NULL, +- LOOK_FOR_NS_IN_ANSWER); ++ result = noanswer_response(fctx, ++ message, ++ NULL, ++ LOOK_FOR_NS_IN_ANSWER); + if (result != ISC_R_SUCCESS) + FCTXTRACE3("noanswer_response (NS)", + result); +@@ -8529,7 +8583,9 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + * record. LOOK_FOR_GLUE_IN_ANSWER will handle + * such a corner case. + */ +- result = noanswer_response(fctx, NULL, ++ result = noanswer_response(fctx, ++ message, ++ NULL, + LOOK_FOR_GLUE_IN_ANSWER); + if (result != ISC_R_SUCCESS) + FCTXTRACE3("noanswer_response", result); +@@ -8561,7 +8617,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * NXDOMAIN, NXRDATASET, or referral. + */ +- result = noanswer_response(fctx, NULL, 0); ++ result = noanswer_response(fctx, message, NULL, 0); + switch (result) { + case ISC_R_SUCCESS: + case DNS_R_CHASEDSSERVERS: +@@ -8617,14 +8673,15 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * Follow additional section data chains. + */ +- chase_additional(fctx); ++ chase_additional(fctx, message); + + /* + * Cache the cacheable parts of the message. This may also cause + * work to be queued to the DNSSEC validator. + */ + if (WANTCACHE(fctx)) { +- result = cache_message(fctx, query->addrinfo, now); ++ result = cache_message(fctx, message, query->addrinfo, ++ now); + if (result != ISC_R_SUCCESS) { + FCTXTRACE3("cache_message complete", result); + goto done; +@@ -8650,7 +8707,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * Cache any negative cache entries in the message. + */ +- result = ncache_message(fctx, query->addrinfo, covers, now); ++ result = ncache_message(fctx, message, query->addrinfo, covers, now); + if (result != ISC_R_SUCCESS) + FCTXTRACE3("ncache_message complete", result); + } +@@ -8689,7 +8746,8 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + * Add this server to the list of bad servers for + * this fctx. + */ +- add_bad(fctx, addrinfo, broken_server, broken_type); ++ add_bad(fctx, message, addrinfo, ++ broken_server, broken_type); + } + + if (get_nameservers) { +@@ -8776,7 +8834,6 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + FCTXTRACE("nextitem"); + inc_stats(fctx->res, dns_resstatscounter_nextitem); + INSIST(query->dispentry != NULL); +- dns_message_reset(fctx->rmessage, DNS_MESSAGE_INTENTPARSE); + result = dns_dispatch_getnext(query->dispentry, &devent); + if (result != ISC_R_SUCCESS) + fctx_done(fctx, result, __LINE__); +@@ -8796,7 +8853,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + fctx_done(fctx, result, __LINE__); + } else if (result == DNS_R_CHASEDSSERVERS) { + unsigned int n; +- add_bad(fctx, addrinfo, result, broken_type); ++ add_bad(fctx, query->rmessage, addrinfo, result, broken_type); + fctx_cancelqueries(fctx, true, false); + fctx_cleanupfinds(fctx); + fctx_cleanupforwaddrs(fctx); +-- +2.23.0 + diff --git a/backport-0063-Properly-handling-dns_message_t-shared-references.patch b/backport-0063-Properly-handling-dns_message_t-shared-references.patch new file mode 100644 index 0000000..0d24511 --- /dev/null +++ b/backport-0063-Properly-handling-dns_message_t-shared-references.patch @@ -0,0 +1,837 @@ +From 79c62017eba6b4e81b24e87267ce75689ef89447 Mon Sep 17 00:00:00 2001 +From: Diego Fronza +Date: Mon, 21 Sep 2020 17:44:29 -0300 +Subject: [PATCH] Properly handling dns_message_t shared references + +This commit fix the problems that arose when moving the dns_message_t +object from fetchctx_t to the query structure. + +Since the lifetime of query objects are different than that of a +fetchctx and the dns_message_t object held by the query may be being +used by some external module, e.g. validator, even after the query may +have been destroyed, propery handling of the references to the message +were added in this commit to avoid accessing an already destroyed +object. + +Specifically, in resquery_response(), a reference to the message is +attached at the beginning of the function and detached at the end, since +a possible call to fctx_cancelquery() would release the dns_message_t +object, and in the next lines of code a call to add_bad() would require +a valid pointer to the same object. + +In valcreate() a new reference is attached to the message object, this +ensures that if the corresponding query object is destroyed before the +validator attempts to access it, no invalid pointer access occurs. + +In validated() we have to attach a new reference to the message, since +we destroy the validator object at the beginning of the function, and we +need access to the message in the next lines of the same function. +Conflict: adapt is_lame fuction, remove irrelevant code +Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/79c62017eba6b4e81b24e87267ce75689ef89447 +--- + lib/dns/resolver.c | 266 ++++++++++++++++++++---------------------- + 7 files changed, 517 insertions(+), 268 deletions(-) + +diff --git a/lib/dns/resolver.c b/lib/dns/resolver.c +index ec5293baad..e3f9aef95c 100644 +--- a/lib/dns/resolver.c ++++ b/lib/dns/resolver.c +@@ -629,7 +629,7 @@ valcreate(fetchctx_t *fctx, dns_message_t *rmessage, + + valarg->fctx = fctx; + valarg->addrinfo = addrinfo; +- valarg->rmessage = rmessage; ++ dns_message_attach(rmessage, &valarg->rmessage); + + if (!ISC_LIST_EMPTY(fctx->validators)) + valoptions |= DNS_VALIDATOR_DEFER; +@@ -647,8 +647,10 @@ valcreate(fetchctx_t *fctx, dns_message_t *rmessage, + fctx->validator = validator; + } + ISC_LIST_APPEND(fctx->validators, validator, link); +- } else ++ } else { ++ dns_message_detach(&valarg->rmessage); + isc_mem_put(fctx->mctx, valarg, sizeof(*valarg)); ++ } + return (result); + } + +@@ -1004,7 +1006,7 @@ fctx_cancelquery(resquery_t **queryp, dns_dispatchevent_t **deventp, + ISFORWARDER(query->addrinfo)) + { + add_bad(fctx, query->rmessage, +- query->addrinfo, ISC_R_TIMEDOUT, ++ query->addrinfo, ISC_R_TIMEDOUT, + badns_forwarder); + } + +@@ -1736,7 +1738,7 @@ fctx_query(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, + } + query->rmessage = NULL; + result = dns_message_create(fctx->mctx, DNS_MESSAGE_INTENTPARSE, +- &query->rmessage); ++ &query->rmessage); + if (result != ISC_R_SUCCESS) { + goto cleanup_query; + } +@@ -4897,7 +4899,7 @@ validated(isc_task_t *task, isc_event_t *event) { + uint32_t ttl; + unsigned options; + uint32_t bucketnum; +- dns_message_t *rmessage; ++ dns_message_t *rmessage = NULL; + + UNUSED(task); /* for now */ + +@@ -4908,7 +4910,7 @@ validated(isc_task_t *task, isc_event_t *event) { + res = fctx->res; + addrinfo = valarg->addrinfo; + REQUIRE(!ISC_LIST_EMPTY(fctx->validators)); +- rmessage = valarg->rmessage; ++ dns_message_attach(valarg->rmessage, &rmessage); + + vevent = (dns_validatorevent_t *)event; + fctx->vresult = vevent->result; +@@ -4927,6 +4929,7 @@ validated(isc_task_t *task, isc_event_t *event) { + * destroy the fctx if necessary. + */ + dns_validator_destroy(&vevent->validator); ++ dns_message_detach(&valarg->rmessage); + isc_mem_put(fctx->mctx, valarg, sizeof(*valarg)); + + negative = (vevent->rdataset == NULL); +@@ -5065,9 +5068,10 @@ validated(isc_task_t *task, isc_event_t *event) { + dns_resolver_addbadcache(res, &fctx->name, + fctx->type, &expire); + fctx_done(fctx, result, __LINE__); /* Locks bucket. */ +- } else ++ } else { + fctx_try(fctx, true, true); /* Locks bucket. */ +- return; ++ } ++ goto cleanup_rmessage; + } + + +@@ -5287,6 +5291,8 @@ validated(isc_task_t *task, isc_event_t *event) { + cleanup_event: + INSIST(node == NULL); + isc_event_free(&event); ++ cleanup_rmessage: ++ dns_message_detach(&rmessage); + } + + static void +@@ -6274,9 +6280,6 @@ check_section(void *arg, dns_name_t *addname, dns_rdatatype_t type, + } + } + +- dns_message_detach(&chkarg->rmessage); +- isc_mem_put(fctx->mctx, chkarg, sizeof(*chkarg)); +- + return (ISC_R_SUCCESS); + } + +@@ -6318,15 +6321,13 @@ chase_additional(fetchctx_t *fctx, dns_message_t *rmessage) { + rdataset != NULL; + rdataset = ISC_LIST_NEXT(rdataset, link)) { + if (CHASE(rdataset)) { +- dns_chkarg_t *chkarg; +- chkarg = isc_mem_get(fctx->mctx, +- sizeof(*chkarg)); +- chkarg->fctx = fctx; +- dns_message_attach(rmessage, &chkarg->rmessage); ++ dns_chkarg_t chkarg; ++ chkarg.fctx = fctx; ++ chkarg.rmessage = rmessage; + rdataset->attributes &= ~DNS_RDATASETATTR_CHASE; + (void)dns_rdataset_additionaldata(rdataset, + check_related, +- chkarg); ++ &chkarg); + rescan = true; + } + } +@@ -6928,7 +6929,7 @@ noanswer_response(fetchctx_t *fctx, dns_message_t *message, + * we're not following a chain.) + */ + if (!negative_response && ns_name != NULL && oqname == NULL) { +- dns_chkarg_t *chkarg; ++ dns_chkarg_t chkarg; + /* + * We already know ns_name is a subdomain of fctx->domain. + * If ns_name is equal to fctx->domain, we're not making +@@ -6958,12 +6959,10 @@ noanswer_response(fetchctx_t *fctx, dns_message_t *message, + */ + INSIST(ns_rdataset != NULL); + FCTX_ATTR_SET(fctx, FCTX_ATTR_GLUING); +- chkarg = isc_mem_get(fctx->mctx, +- sizeof(*chkarg)); +- chkarg->fctx = fctx; +- dns_message_attach(message, &chkarg->rmessage); ++ chkarg.fctx = fctx; ++ chkarg.rmessage = message; + (void)dns_rdataset_additionaldata(ns_rdataset, check_related, +- chkarg); ++ &chkarg); + #if CHECK_FOR_GLUE_IN_ANSWER + /* + * Look in the answer section for "glue" that is incorrectly +@@ -6975,12 +6974,11 @@ noanswer_response(fetchctx_t *fctx, dns_message_t *message, + if ((look_in_options & LOOK_FOR_GLUE_IN_ANSWER) != 0 && + (fctx->type == dns_rdatatype_aaaa || + fctx->type == dns_rdatatype_a)) { +- dns_chkarg_t *chkarg; +- chkarg = isc_mem_get(fctx->mctx, sizeof(*chkarg)); +- chkarg->fctx = fctx; +- dns_message_attach(message, &chkarg->rmessage); ++ dns_chkarg_t chkarg; ++ chkarg.fcx = fctx; ++ chkarg.rmessage = message; + (void)dns_rdataset_additionaldata(ns_rdataset, +- check_answer, chkarg); ++ check_answer, &chkarg); + } + #endif + FCTX_ATTR_CLR(fctx, FCTX_ATTR_GLUING); +@@ -7182,7 +7180,7 @@ answer_response(fetchctx_t *fctx, dns_message_t *message) { + rdataset != NULL; + rdataset = ISC_LIST_NEXT(rdataset, link)) + { +- dns_chkarg_t *chkarg; ++ dns_chkarg_t chkarg; + if (!validinanswer(rdataset, fctx)) { + return (DNS_R_FORMERR); + } +@@ -7211,16 +7209,14 @@ answer_response(fetchctx_t *fctx, dns_message_t *message) { + rdataset->attributes |= DNS_RDATASETATTR_CACHE; + rdataset->trust = trust; + rdataset->attributes &= ~DNS_RDATASETATTR_CHASE; +- chkarg = isc_mem_get(fctx->mctx, +- sizeof(*chkarg)); +- chkarg->fctx = fctx; +- dns_message_attach(message, &chkarg->rmessage); ++ chkarg.fctx = fctx; ++ chkarg.rmessage = message; + (void)dns_rdataset_additionaldata(rdataset, + check_related, +- chkarg); ++ &chkarg); + } + } else if (aname != NULL) { +- dns_chkarg_t *chkarg; ++ dns_chkarg_t chkarg; + if (!validinanswer(ardataset, fctx)) + return (DNS_R_FORMERR); + if ((ardataset->type == dns_rdatatype_a || +@@ -7242,12 +7238,10 @@ answer_response(fetchctx_t *fctx, dns_message_t *message) { + ardataset->attributes |= DNS_RDATASETATTR_ANSWER; + ardataset->attributes |= DNS_RDATASETATTR_CACHE; + ardataset->trust = trust; +- chkarg = isc_mem_get(fctx->mctx, +- sizeof(*chkarg)); +- chkarg->fctx = fctx; +- dns_message_attach(message, &chkarg->rmessage); ++ chkarg.fctx = fctx; ++ chkarg.rmessage = message; + (void)dns_rdataset_additionaldata(ardataset, check_related, +- chkarg); ++ &chkarg); + for (sigrdataset = ISC_LIST_HEAD(aname->list); + sigrdataset != NULL; + sigrdataset = ISC_LIST_NEXT(sigrdataset, link)) { +@@ -7385,7 +7379,7 @@ answer_response(fetchctx_t *fctx, dns_message_t *message) { + if (rdataset->type == dns_rdatatype_ns || + (rdataset->type == dns_rdatatype_rrsig && + rdataset->covers == dns_rdatatype_ns)) { +- dns_chkarg_t *chkarg; ++ dns_chkarg_t chkarg; + name->attributes |= + DNS_NAMEATTR_CACHE; + rdataset->attributes |= +@@ -7407,15 +7401,12 @@ answer_response(fetchctx_t *fctx, dns_message_t *message) { + * Mark any additional data related + * to this rdataset. + */ +- chkarg = isc_mem_get(fctx->mctx, +- sizeof(*chkarg)); +- chkarg->fctx = fctx; +- dns_message_attach(message, +- &chkarg->rmessage); ++ chkarg.fctx = fctx; ++ chkarg.rmessage = message; + (void)dns_rdataset_additionaldata( + rdataset, + check_related, +- chkarg); ++ &chkarg); + done = true; + } + } +@@ -7835,7 +7826,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + dns_dispatchevent_t *devent = (dns_dispatchevent_t *)event; + bool keep_trying, get_nameservers, resend, nextitem; + bool truncated; +- dns_message_t *message; ++ dns_message_t *rmessage = NULL; + dns_rdataset_t *opt; + fetchctx_t *fctx; + dns_name_t *fname; +@@ -7866,6 +7857,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + options = query->options; + REQUIRE(VALID_FCTX(fctx)); + REQUIRE(event->ev_type == DNS_EVENT_DISPATCH); ++ dns_message_attach(query->rmessage, &rmessage); + + QTRACE("response"); + +@@ -7947,10 +7939,8 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + goto done; + } + +- message = query->rmessage; +- + if (query->tsig != NULL) { +- result = dns_message_setquerytsig(message, query->tsig); ++ result = dns_message_setquerytsig(rmessage, query->tsig); + if (result != ISC_R_SUCCESS) { + FCTXTRACE3("unable to set query tsig", result); + goto done; +@@ -7958,14 +7948,14 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + } + + if (query->tsigkey) { +- result = dns_message_settsigkey(message, query->tsigkey); ++ result = dns_message_settsigkey(rmessage, query->tsigkey); + if (result != ISC_R_SUCCESS) { + FCTXTRACE3("unable to set tsig key", result); + goto done; + } + } + +- dns_message_setclass(message, res->rdclass); ++ dns_message_setclass(rmessage, res->rdclass); + + if ((options & DNS_FETCHOPT_TCP) == 0) { + if ((options & DNS_FETCHOPT_NOEDNS0) == 0) +@@ -7974,16 +7964,16 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + else + dns_adb_plainresponse(fctx->adb, query->addrinfo); + } +- result = dns_message_parse(message, &devent->buffer, 0); ++ result = dns_message_parse(rmessage, &devent->buffer, 0); + if (result != ISC_R_SUCCESS) { + FCTXTRACE3("message failed to parse", result); + switch (result) { + case ISC_R_UNEXPECTEDEND: +- if (!message->question_ok || +- (message->flags & DNS_MESSAGEFLAG_TC) == 0 || ++ if (!rmessage->question_ok || ++ (rmessage->flags & DNS_MESSAGEFLAG_TC) == 0 || + (options & DNS_FETCHOPT_TCP) != 0) { + /* +- * Either the message ended prematurely, ++ * Either the rmessage ended prematurely, + * and/or wasn't marked as being truncated, + * and/or this is a response to a query we + * sent over TCP. In all of these cases, +@@ -8012,7 +8002,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + } + /* + * We defer retrying via TCP for a bit so we can +- * check out this message further. ++ * check out this rmessage further. + */ + truncated = true; + break; +@@ -8043,7 +8033,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * Log the incoming packet. + */ +- dns_message_logfmtpacket2(message, "received packet from", ++ dns_message_logfmtpacket2(rmessage, "received packet from", + &query->addrinfo->sockaddr, + DNS_LOGCATEGORY_RESOLVER, + DNS_LOGMODULE_PACKETS, +@@ -8089,7 +8079,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + &zr, &query->start, NULL, &devent->buffer); + #endif /* HAVE_DNSTAP */ + +- if (message->rdclass != res->rdclass) { ++ if (rmessage->rdclass != res->rdclass) { + resend = true; + FCTXTRACE("bad class"); + goto done; +@@ -8098,11 +8088,11 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * Process receive opt record. + */ +- opt = dns_message_getopt(message); ++ opt = dns_message_getopt(rmessage); + if (opt != NULL) + process_opt(query, opt); + +- if (message->cc_bad && (options & DNS_FETCHOPT_TCP) == 0) { ++ if (rmessage->cc_bad && (options & DNS_FETCHOPT_TCP) == 0) { + /* + * If the COOKIE is bad, assume it is an attack and + * keep listening for a good answer. +@@ -8125,10 +8115,10 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + * the same question. + * FORMERR/NOTIMP if they have a question section then it must match. + */ +- switch (message->rcode) { ++ switch (rmessage->rcode) { + case dns_rcode_notimp: + case dns_rcode_formerr: +- if (message->counts[DNS_SECTION_QUESTION] == 0) ++ if (rmessage->counts[DNS_SECTION_QUESTION] == 0) + break; + /* FALLTHROUGH */ + case dns_rcode_nxrrset: /* Not expected. */ +@@ -8139,7 +8129,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + case dns_rcode_refused: + case dns_rcode_servfail: + default: +- result = same_question(fctx, message); ++ result = same_question(fctx, rmessage); + if (result != ISC_R_SUCCESS) { + FCTXTRACE3("response did not match question", result); + nextitem = true; +@@ -8149,10 +8139,10 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + } + + /* +- * If the message is signed, check the signature. If not, this ++ * If the rmessage is signed, check the signature. If not, this + * returns success anyway. + */ +- result = dns_message_checksig(message, res->view); ++ result = dns_message_checksig(rmessage, res->view); + if (result != ISC_R_SUCCESS) { + FCTXTRACE3("signature check failed", result); + goto done; +@@ -8161,12 +8151,12 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * The dispatcher should ensure we only get responses with QR set. + */ +- INSIST((message->flags & DNS_MESSAGEFLAG_QR) != 0); ++ INSIST((rmessage->flags & DNS_MESSAGEFLAG_QR) != 0); + /* +- * INSIST() that the message comes from the place we sent it to, ++ * INSIST() that the rmessage comes from the place we sent it to, + * since the dispatch code should ensure this. + * +- * INSIST() that the message id is correct (this should also be ++ * INSIST() that the rmessage id is correct (this should also be + * ensured by the dispatch code). + */ + +@@ -8177,12 +8167,12 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + * EDNS support. + */ + if (opt == NULL && !EDNSOK(query->addrinfo) && +- (message->rcode == dns_rcode_noerror || +- message->rcode == dns_rcode_nxdomain || +- message->rcode == dns_rcode_refused || +- message->rcode == dns_rcode_yxdomain) && ++ (rmessage->rcode == dns_rcode_noerror || ++ rmessage->rcode == dns_rcode_nxdomain || ++ rmessage->rcode == dns_rcode_refused || ++ rmessage->rcode == dns_rcode_yxdomain) && + bad_edns(fctx, &query->addrinfo->sockaddr)) { +- dns_message_logpacket2(message, ++ dns_message_logpacket2(rmessage, + "received packet (bad edns) from", + &query->addrinfo->sockaddr, + DNS_LOGCATEGORY_RESOLVER, +@@ -8192,10 +8182,10 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + dns_adb_changeflags(fctx->adb, query->addrinfo, + DNS_FETCHOPT_NOEDNS0, + DNS_FETCHOPT_NOEDNS0); +- } else if (opt == NULL && (message->flags & DNS_MESSAGEFLAG_TC) == 0 && ++ } else if (opt == NULL && (rmessage->flags & DNS_MESSAGEFLAG_TC) == 0 && + !EDNSOK(query->addrinfo) && +- (message->rcode == dns_rcode_noerror || +- message->rcode == dns_rcode_nxdomain) && ++ (rmessage->rcode == dns_rcode_noerror || ++ rmessage->rcode == dns_rcode_nxdomain) && + (query->options & DNS_FETCHOPT_NOEDNS0) == 0) { + /* + * We didn't get a OPT record in response to a EDNS query. +@@ -8208,7 +8198,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + * should be safe to do for any rcode we limit it to NOERROR + * and NXDOMAIN. + */ +- dns_message_logpacket2(message, "received packet (no opt) from", ++ dns_message_logpacket2(rmessage, "received packet (no opt) from", + &query->addrinfo->sockaddr, + DNS_LOGCATEGORY_RESOLVER, + DNS_LOGMODULE_RESOLVER, +@@ -8224,10 +8214,10 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + */ + if (opt != NULL && !EDNSOK(query->addrinfo) && + (query->options & DNS_FETCHOPT_NOEDNS0) == 0 && +- (message->rcode == dns_rcode_noerror || +- message->rcode == dns_rcode_nxdomain || +- message->rcode == dns_rcode_refused || +- message->rcode == dns_rcode_yxdomain)) { ++ (rmessage->rcode == dns_rcode_noerror || ++ rmessage->rcode == dns_rcode_nxdomain || ++ rmessage->rcode == dns_rcode_refused || ++ rmessage->rcode == dns_rcode_yxdomain)) { + dns_adb_changeflags(fctx->adb, query->addrinfo, + FCTX_ADDRINFO_EDNSOK, + FCTX_ADDRINFO_EDNSOK); +@@ -8236,7 +8226,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * Deal with truncated responses by retrying using TCP. + */ +- if ((message->flags & DNS_MESSAGEFLAG_TC) != 0) ++ if ((rmessage->flags & DNS_MESSAGEFLAG_TC) != 0) + truncated = true; + + if (truncated) { +@@ -8255,19 +8245,19 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * Is it a query response? + */ +- if (message->opcode != dns_opcode_query) { ++ if (rmessage->opcode != dns_opcode_query) { + /* XXXRTH Log */ + broken_server = DNS_R_UNEXPECTEDOPCODE; + keep_trying = true; +- FCTXTRACE("invalid message opcode"); ++ FCTXTRACE("invalid rmessage opcode"); + goto done; + } + + /* + * Update statistics about erroneous responses. + */ +- if (message->rcode != dns_rcode_noerror) { +- switch (message->rcode) { ++ if (rmessage->rcode != dns_rcode_noerror) { ++ switch (rmessage->rcode) { + case dns_rcode_nxdomain: + inc_stats(res, dns_resstatscounter_nxdomain); + break; +@@ -8295,9 +8285,9 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * Is the remote server broken, or does it dislike us? + */ +- if (message->rcode != dns_rcode_noerror && +- message->rcode != dns_rcode_yxdomain && +- message->rcode != dns_rcode_nxdomain) { ++ if (rmessage->rcode != dns_rcode_noerror && ++ rmessage->rcode != dns_rcode_yxdomain && ++ rmessage->rcode != dns_rcode_nxdomain) { + isc_buffer_t b; + char code[64]; + unsigned char cookie[64]; +@@ -8306,19 +8296,19 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + * Some servers do not ignore unknown EDNS options. + */ + if (!NOCOOKIE(query->addrinfo) && +- (message->rcode == dns_rcode_formerr || +- message->rcode == dns_rcode_notimp || +- message->rcode == dns_rcode_refused) && ++ (rmessage->rcode == dns_rcode_formerr || ++ rmessage->rcode == dns_rcode_notimp || ++ rmessage->rcode == dns_rcode_refused) && + dns_adb_getcookie(fctx->adb, query->addrinfo, + cookie, sizeof(cookie)) == 0U) { + dns_adb_changeflags(fctx->adb, query->addrinfo, + FCTX_ADDRINFO_NOCOOKIE, + FCTX_ADDRINFO_NOCOOKIE); + resend = true; +- } else if ((message->rcode == dns_rcode_formerr || +- message->rcode == dns_rcode_notimp || +- (message->rcode == dns_rcode_servfail && +- dns_message_getopt(message) == NULL)) && ++ } else if ((rmessage->rcode == dns_rcode_formerr || ++ rmessage->rcode == dns_rcode_notimp || ++ (rmessage->rcode == dns_rcode_servfail && ++ dns_message_getopt(rmessage) == NULL)) && + (query->options & DNS_FETCHOPT_NOEDNS0) == 0) { + /* + * It's very likely they don't like EDNS0. +@@ -8338,7 +8328,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + */ + add_bad_edns(fctx, &query->addrinfo->sockaddr); + inc_stats(res, dns_resstatscounter_edns0fail); +- } else if (message->rcode == dns_rcode_formerr) { ++ } else if (rmessage->rcode == dns_rcode_formerr) { + if (ISFORWARDER(query->addrinfo)) { + /* + * This forwarder doesn't understand us, +@@ -8358,7 +8348,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + log_formerr(fctx, "server sent FORMERR"); + result = DNS_R_FORMERR; + } +- } else if (message->rcode == dns_rcode_badvers) { ++ } else if (rmessage->rcode == dns_rcode_badvers) { + unsigned int version; + bool setnocookie = false; + #if DNS_EDNS_VERSION > 0 +@@ -8425,8 +8415,8 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + keep_trying = true; + } + #endif +- } else if (message->rcode == dns_rcode_badcookie && +- message->cc_ok) { ++ } else if (rmessage->rcode == dns_rcode_badcookie && ++ rmessage->cc_ok) { + /* + * We have recorded the new cookie. + */ +@@ -8444,7 +8434,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + } + + isc_buffer_init(&b, code, sizeof(code) - 1); +- dns_rcode_totext(message->rcode, &b); ++ dns_rcode_totext(rmessage->rcode, &b); + code[isc_buffer_usedlength(&b)] = '\0'; + FCTXTRACE2("remote server broken: returned ", code); + goto done; +@@ -8454,7 +8444,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * Is the server lame? + */ +- if (!ISFORWARDER(query->addrinfo) && is_lame(fctx, message)) { ++ if (!ISFORWARDER(query->addrinfo) && is_lame(fctx, rmessage)) { + inc_stats(res, dns_resstatscounter_lame); + log_lame(fctx, query->addrinfo); + if (res->lame_ttl != 0) { +@@ -8477,7 +8467,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + if (!ISFORWARDER(query->addrinfo) && + dns_view_isdelegationonly(res->view, &fctx->domain) && + !dns_name_equal(&fctx->domain, &fctx->name) && +- fix_mustbedelegationornxdomain(message, fctx)) { ++ fix_mustbedelegationornxdomain(rmessage, fctx)) { + char namebuf[DNS_NAME_FORMATSIZE]; + char domainbuf[DNS_NAME_FORMATSIZE]; + char addrbuf[ISC_SOCKADDR_FORMATSIZE]; +@@ -8500,7 +8490,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + } + + if ((res->options & DNS_RESOLVER_CHECKNAMES) != 0) +- checknames(message); ++ checknames(rmessage); + + /* + * Clear cache bits. +@@ -8510,22 +8500,22 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * Did we get any answers? + */ +- if (message->counts[DNS_SECTION_ANSWER] > 0 && +- (message->rcode == dns_rcode_noerror || +- message->rcode == dns_rcode_yxdomain || +- message->rcode == dns_rcode_nxdomain)) { ++ if (rmessage->counts[DNS_SECTION_ANSWER] > 0 && ++ (rmessage->rcode == dns_rcode_noerror || ++ rmessage->rcode == dns_rcode_yxdomain || ++ rmessage->rcode == dns_rcode_nxdomain)) { + /* + * [normal case] + * We've got answers. If it has an authoritative answer or an + * answer from a forwarder, we're done. + */ +- if ((message->flags & DNS_MESSAGEFLAG_AA) != 0 || ++ if ((rmessage->flags & DNS_MESSAGEFLAG_AA) != 0 || + ISFORWARDER(query->addrinfo)) + { +- result = answer_response(fctx, message); ++ result = answer_response(fctx, rmessage); + if (result != ISC_R_SUCCESS) + FCTXTRACE3("answer_response (AA/fwd)", result); +- } else if (iscname(fctx, message) && ++ } else if (iscname(fctx, rmessage) && + fctx->type != dns_rdatatype_any && + fctx->type != dns_rdatatype_cname) + { +@@ -8534,16 +8524,16 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + * answer when a CNAME is followed. We should treat + * it as a valid answer. + */ +- result = answer_response(fctx, message); ++ result = answer_response(fctx, rmessage); + if (result != ISC_R_SUCCESS) + FCTXTRACE3("answer_response (!ANY/!CNAME)", + result); + } else if (fctx->type != dns_rdatatype_ns && +- !betterreferral(fctx, message)) { ++ !betterreferral(fctx, rmessage)) { + /* + * Lame response !!!. + */ +- result = answer_response(fctx, message); ++ result = answer_response(fctx, rmessage); + if (result != ISC_R_SUCCESS) + FCTXTRACE3("answer_response (!NS)", result); + } else { +@@ -8557,7 +8547,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + * special kludge to treat it as a referral. + */ + result = noanswer_response(fctx, +- message, ++ rmessage, + NULL, + LOOK_FOR_NS_IN_ANSWER); + if (result != ISC_R_SUCCESS) +@@ -8577,7 +8567,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + * such a corner case. + */ + result = noanswer_response(fctx, +- message, ++ rmessage, + NULL, + LOOK_FOR_GLUE_IN_ANSWER); + if (result != ISC_R_SUCCESS) +@@ -8602,13 +8592,13 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + keep_trying = true; + goto done; + } +- } else if (message->counts[DNS_SECTION_AUTHORITY] > 0 || +- message->rcode == dns_rcode_noerror || +- message->rcode == dns_rcode_nxdomain) { ++ } else if (rmessage->counts[DNS_SECTION_AUTHORITY] > 0 || ++ rmessage->rcode == dns_rcode_noerror || ++ rmessage->rcode == dns_rcode_nxdomain) { + /* + * NXDOMAIN, NXRDATASET, or referral. + */ +- result = noanswer_response(fctx, message, NULL, 0); ++ result = noanswer_response(fctx, rmessage, NULL, 0); + switch (result) { + case ISC_R_SUCCESS: + case DNS_R_CHASEDSSERVERS: +@@ -8664,14 +8654,14 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * Follow additional section data chains. + */ +- chase_additional(fctx, message); ++ chase_additional(fctx, rmessage); + + /* +- * Cache the cacheable parts of the message. This may also cause ++ * Cache the cacheable parts of the rmessage. This may also cause + * work to be queued to the DNSSEC validator. + */ + if (WANTCACHE(fctx)) { +- result = cache_message(fctx, message, query->addrinfo, ++ result = cache_message(fctx, rmessage, query->addrinfo, + now); + if (result != ISC_R_SUCCESS) { + FCTXTRACE3("cache_message complete", result); +@@ -8680,7 +8670,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + } + + /* +- * Ncache the negatively cacheable parts of the message. This may ++ * Ncache the negatively cacheable parts of the rmessage. This may + * also cause work to be queued to the DNSSEC validator. + */ + if (WANTNCACHE(fctx)) { +@@ -8689,16 +8679,16 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + /* + * Cache DS NXDOMAIN separately to other types. + */ +- if (message->rcode == dns_rcode_nxdomain && ++ if (rmessage->rcode == dns_rcode_nxdomain && + fctx->type != dns_rdatatype_ds) + covers = dns_rdatatype_any; + else + covers = fctx->type; + + /* +- * Cache any negative cache entries in the message. ++ * Cache any negative cache entries in the rmessage. + */ +- result = ncache_message(fctx, message, query->addrinfo, covers, now); ++ result = ncache_message(fctx, rmessage, query->addrinfo, covers, now); + if (result != ISC_R_SUCCESS) + FCTXTRACE3("ncache_message complete", result); + } +@@ -8726,7 +8716,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + #ifdef ENABLE_AFL + if (dns_fuzzing_resolver && (keep_trying || resend)) { + fctx_done(fctx, DNS_R_SERVFAIL, __LINE__); +- return; ++ goto cleanup_rmessage; + } else + #endif + if (keep_trying) { +@@ -8737,7 +8727,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + * Add this server to the list of bad servers for + * this fctx. + */ +- add_bad(fctx, message, addrinfo, ++ add_bad(fctx, rmessage, addrinfo, + broken_server, broken_type); + } + +@@ -8746,7 +8736,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + fname = dns_fixedname_initname(&foundname); + if (result != ISC_R_SUCCESS) { + fctx_done(fctx, DNS_R_SERVFAIL, __LINE__); +- return; ++ goto detach_rmessage; + } + findoptions = 0; + if (dns_rdatatype_atparent(fctx->type)) +@@ -8764,7 +8754,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + if (result != ISC_R_SUCCESS) { + FCTXTRACE("couldn't find a zonecut"); + fctx_done(fctx, DNS_R_SERVFAIL, __LINE__); +- return; ++ goto detach_rmessage; + } + if (!dns_name_issubdomain(fname, &fctx->domain)) { + /* +@@ -8773,7 +8763,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + */ + FCTXTRACE("nameservers now above QDOMAIN"); + fctx_done(fctx, DNS_R_SERVFAIL, __LINE__); +- return; ++ goto detach_rmessage; + } + + fcount_decr(fctx); +@@ -8782,12 +8772,12 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + result = dns_name_dup(fname, fctx->mctx, &fctx->domain); + if (result != ISC_R_SUCCESS) { + fctx_done(fctx, DNS_R_SERVFAIL, __LINE__); +- return; ++ goto detach_rmessage; + } + result = fcount_incr(fctx, true); + if (result != ISC_R_SUCCESS) { + fctx_done(fctx, DNS_R_SERVFAIL, __LINE__); +- return; ++ goto detach_rmessage; + } + fctx->ns_ttl = fctx->nameservers.ttl; + fctx->ns_ttl_ok = true; +@@ -8844,7 +8834,7 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + fctx_done(fctx, result, __LINE__); + } else if (result == DNS_R_CHASEDSSERVERS) { + unsigned int n; +- add_bad(fctx, query->rmessage, addrinfo, result, broken_type); ++ add_bad(fctx, rmessage, addrinfo, result, broken_type); + fctx_cancelqueries(fctx, true, false); + fctx_cleanupfinds(fctx); + fctx_cleanupforwaddrs(fctx); +@@ -8875,6 +8865,8 @@ resquery_response(isc_task_t *task, isc_event_t *event) { + */ + fctx_done(fctx, result, __LINE__); + } ++detach_rmessage: ++ dns_message_detach(&rmessage); + } + + /*** +-- +2.23.0 diff --git a/backport-CVE-2021-25220.patch b/backport-CVE-2021-25220.patch index f4ea94b..0755072 100644 --- a/backport-CVE-2021-25220.patch +++ b/backport-CVE-2021-25220.patch @@ -165,7 +165,7 @@ index 32cdbb9..153825c 100644 check_section(void *arg, dns_name_t *addname, dns_rdatatype_t type, dns_section_t section) @@ -6252,7 +6367,7 @@ check_section(void *arg, dns_name_t *addname, dns_rdatatype_t type, - result = dns_message_findname(fctx->rmessage, section, addname, + result = dns_message_findname(rmessage, section, addname, dns_rdatatype_any, 0, &name, NULL); if (result == ISC_R_SUCCESS) { - external = !dns_name_issubdomain(name, &fctx->domain); diff --git a/bind.spec b/bind.spec index 4dced61..7bc6b89 100644 --- a/bind.spec +++ b/bind.spec @@ -19,7 +19,7 @@ Name: bind Summary: Domain Name System (DNS) Server (named) License: MPLv2.0 Version: 9.11.21 -Release: 12 +Release: 13 Epoch: 32 Url: http://www.isc.org/products/BIND/ Source0: https://ftp.isc.org/isc/bind9/9.11.21/bind-%{version}.tar.gz @@ -172,6 +172,71 @@ Patch12: bind-9.10-sdb.patch # needs inpection Patch13: bind-9.3.2b1-fix_sdb_ldap.patch +Patch6000: backport-0000-Fix-nxdomain-redirect-assertion-failure.patch +Patch6001: backport-0001-Add-test-for-nxdomain-redirect-ncachenxdomain.patch +Patch6002: backport-0002-make-sure-new_zone_lock-is-locked-before-unlocking-i.patch +Patch6003: backport-0003-Prevent-crash-on-dst-initialization-failure.patch +Patch6004: backport-0004-IPSECKEY-require-non-zero-length-public-keys.patch +Patch6005: backport-0005-NSEC3PARAM-check-that-saltlen-is-consistent-with-the.patch +Patch6006: backport-0006-A6-return-FORMERR-in-fromwire-if-bits-are-non-zero.patch +Patch6007: backport-0007-Cast-the-original-rcode-to-dns_ttl_t-when-setting-ex.patch +Patch6008: backport-0008-Lock-on-msg-SELECT_POKE_CLOSE-as-it-triggers-a-tsan-.patch +Patch6009: backport-0009-Lock-access-when-updating-reading-manager-epoll_even.patch +Patch6010: backport-0010-Take-complete-ownership-of-aclp-before-calling-destr.patch +Patch6011: backport-0011-Take-complete-ownership-of-validatorp-before-calling.patch +Patch6012: backport-0012-Address-lock-order-inversion.patch +Patch6013: backport-0013-It-appears-that-you-can-t-change-what-you-are-pollin.patch +Patch6014: backport-0014-counter-used-was-read-without-the-lock-being-held.patch +Patch6015: backport-0015-Missing-locks-in-ns_lwresd_shutdown.patch +Patch6016: backport-0016-Use-atomics-to-update-counters.patch +Patch6017: backport-0017-Obtain-a-lock-on-the-quota-structure.patch +Patch6018: backport-0018-The-node-lock-was-released-too-early.patch +Patch6019: backport-0019-Address-lock-order-inversion-between-the-keytable-an.patch +Patch6020: backport-0020-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch +Patch6021: backport-0021-Address-lock-order-reversals-when-shutting-down-a-vi.patch +Patch6022: backport-0022-Hold-qid-lock-when-calling-deref_portentry-as.patch +Patch6023: backport-0023-Lock-zone-before-calling-zone_namerd_tostr.patch +Patch6024: backport-0024-Address-TSAN-error-between-dns_rbt_findnode-and-subt.patch +Patch6025: backport-0025-Address-data-race-in-dns_stats_detach-over-reference.patch +Patch6026: backport-0026-Lock-check-of-DNS_ZONEFLG_EXITING-flag.patch +Patch6027: backport-0027-Fix-locking-for-LMDB-0.9.26.patch +Patch6028: backport-0028-Correctly-encode-LOC-records-with-non-integer-negati.patch +Patch6029: backport-0029-isc_ratelimiter-needs-to-hold-a-reference-to-its-tas.patch +Patch6030: backport-0030-Lock-access-to-flags-in-dns__zone_loadpending.patch +Patch6031: backport-0031-Update-init_count-atomically-to-silence-tsan-errors.patch +Patch6032: backport-0032-dig-bufsize-0-failed-to-disable-EDNS-as-a-side-effec.patch +Patch6033: backport-0033-Remove-optimisation-on-obtaining-a-headlock-as-it-tr.patch +Patch6034: backport-0034-Address-tsan-error-in-view-destroy.patch +Patch6035: backport-0035-Lock-access-to-ctx-blocked-as-it-is-updated-by-multi.patch +Patch6036: backport-0036-Only-test-node-data-if-we-care-about-whether-data-is.patch +Patch6037: backport-0037-Test-if-linked-while-holding-the-queue-lock.patch +Patch6038: backport-0038-Address-data-race-in-dns_adbentry_overquota.patch +Patch6039: backport-0039-Address-lock-order-inversion.patch +Patch6040: backport-0040-Prevent-loads_pending-going-to-zero-while-kicking-th.patch +Patch6041: backport-0041-Address-data-races-between-socket-bitfields.patch +Patch6042: backport-0042-Only-read-dns_master_indent-and-dns_master_indentstr.patch +Patch6043: backport-0043-Defer-read-of-zl-server-and-zl-reconfig-until.patch +Patch6044: backport-0044-Use-a-reference-counter-for-zt.patch +Patch6045: backport-0045-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch +Patch6046: backport-0046-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch +Patch6047: backport-0047-Pause-dbiterator-to-release-rwlock-to-prevent-lock-o.patch +Patch6048: backport-0048-Pause-dbiterator-ealier-to-prevent-lock-order-invers.patch +Patch6049: backport-0049-Lock-access-to-control-symtab-to-prevent-data-race.patch +Patch6050: backport-0050-Address-lock-order-inversion.patch +Patch6051: backport-0051-Break-lock-order-loop-by-sending-TAT-in-an-event.patch +Patch6052: backport-0052-Handle-DNS_R_NCACHENXRRSET-in-fetch_callback_-dnskey.patch +Patch6053: backport-0053-Lock-read-of-refs-when-atomics-are-not-available.patch +Patch6054: backport-0054-Inactive-incorrectly-incremented.patch +Patch6055: backport-0055-Resolve-TSAN-data-race-in-zone_maintenance.patch +Patch6056: backport-0056-Free-resources-when-gss_accept_sec_context-fails.patch +Patch6057: backport-0057-Unload-a-zone-if-a-transfer-breaks-its-SOA-record.patch +Patch6058: backport-0058-Address-inconsistencies-in-checking-added-RRsets.patch +Patch6059: backport-0059-dns_rdata_tostruct-should-reject-rdata-with-DNS_RDAT.patch +Patch6060: backport-0060-Update-init_count-atomically-to-silence-tsan-errors.patch +Patch6061: backport-0061-Refactored-dns_message_t-for-using-attach-detach-sem.patch +Patch6062: backport-0062-Fix-invalid-dns-message-state-in-resolver-s-logic.patch +Patch6063: backport-0063-Properly-handling-dns_message_t-shared-references.patch + %description Berkeley Internet Name Domain (BIND) is an implementation of the Domain Name System (DNS) protocols and provides an openly redistributable reference @@ -368,13 +433,86 @@ are used for building ISC DHCP. %patch193 -p1 %patch194 -p1 %patch195 -p1 + +mkdir lib/dns/tests/testdata/dstrandom +cp -a %{SOURCE29} lib/dns/tests/testdata/dstrandom/random.data + +%patch133 -p1 -b .rh640538 +%patch134 -p1 -b .rh669163 + +%patch6000 -p1 +%patch6001 -p1 +%patch6027 -p1 +%patch6002 -p1 +%patch6003 -p1 +%patch6004 -p1 +%patch6005 -p1 +%patch6006 -p1 +%patch6007 -p1 +%patch6008 -p1 +%patch6009 -p1 +%patch6010 -p1 +%patch6011 -p1 +%patch6012 -p1 +%patch6013 -p1 +%patch6014 -p1 +%patch6015 -p1 +%patch6016 -p1 +%patch6017 -p1 +%patch6018 -p1 +%patch6019 -p1 +%patch6020 -p1 +%patch6021 -p1 +%patch6022 -p1 +%patch6023 -p1 +%patch6024 -p1 +%patch6025 -p1 +%patch6026 -p1 + %patch196 -p1 %patch197 -p1 + +%patch6028 -p1 +%patch6029 -p1 +%patch6030 -p1 +%patch6031 -p1 +%patch6032 -p1 +%patch6033 -p1 +%patch6034 -p1 +%patch6035 -p1 +%patch6036 -p1 +%patch6037 -p1 +%patch6038 -p1 +%patch6039 -p1 +%patch6040 -p1 +%patch6041 -p1 +%patch6042 -p1 +%patch6043 -p1 +%patch6044 -p1 +%patch6045 -p1 +%patch6046 -p1 +%patch6047 -p1 +%patch6048 -p1 +%patch6049 -p1 +%patch6050 -p1 +%patch6051 -p1 +%patch6052 -p1 +%patch6053 -p1 +%patch6054 -p1 +%patch6055 -p1 +%patch6056 -p1 +%patch6057 -p1 +%patch6058 -p1 +%patch6059 -p1 +%patch6060 -p1 + %patch198 -p1 -%patch199 -p1 -mkdir lib/dns/tests/testdata/dstrandom -cp -a %{SOURCE29} lib/dns/tests/testdata/dstrandom/random.data +%patch6061 -p1 +%patch6062 -p1 +%patch6063 -p1 + +%patch199 -p1 %if %{with PKCS11} cp -r bin/named{,-pkcs11} @@ -415,9 +553,6 @@ cp -fp contrib/sdb/sqlite/zone2sqlite.c bin/sdb_tools %patch137 -p1 -b .strlcat_fix %endif -%patch133 -p1 -b .rh640538 -%patch134 -p1 -b .rh669163 - # Sparc and s390 arches need to use -fPIE %ifarch sparcv9 sparc64 s390 s390x for i in bin/named{,-sdb}/{,unix}/Makefile.in; do @@ -479,7 +614,7 @@ export LIBDIR_SUFFIXi= %endif %if %{with JSON} --with-libjson \ -%endif +%endif %if %{with DNSTAP} --enable-dnstap \ %endif @@ -1160,6 +1295,12 @@ rm -rf ${RPM_BUILD_ROOT} %changelog +* Sat Sep 03 2022 jiangheng - 32:9.11.21-13 +- Type:bugfix +- ID:NA +- SUG:NA +- DESC:backport some patches from community + * Fri Sep 02 2022 jiangheng - 32:9.11.21-12 - Type:bugfix - ID:NA -- Gitee