diff --git a/0001-CVE-2020-27781-1.patch b/0001-CVE-2020-27781-1.patch deleted file mode 100644 index 12f4db54803459a273799c47885df741fd48dff1..0000000000000000000000000000000000000000 --- a/0001-CVE-2020-27781-1.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 5dbc6bf0a67183bff7d7ca48ccd90ebbce492408 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?=C4=90=E1=BA=B7ng=20Minh=20D=C5=A9ng?= -Date: Sun, 10 May 2020 11:37:23 +0700 -Subject: [PATCH 1/5] pybind/ceph_volume_client: Fix PEP-8 SyntaxWarning -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Signed-off-by: Đặng Minh Dũng -(cherry picked from commit 3ce9a89a5a1a2d7fa3d57c597b781a6aece7cbb5) ---- - src/pybind/ceph_volume_client.py | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py -index 7d7e5b49e40..25cd6b91ae2 100644 ---- a/src/pybind/ceph_volume_client.py -+++ b/src/pybind/ceph_volume_client.py -@@ -355,7 +355,7 @@ class CephFSVolumeClient(object): - continue - - (group_id, volume_id) = volume.split('/') -- group_id = group_id if group_id is not 'None' else None -+ group_id = group_id if group_id != 'None' else None - volume_path = VolumePath(group_id, volume_id) - access_level = volume_data['access_level'] - -@@ -378,7 +378,7 @@ class CephFSVolumeClient(object): - if vol_meta['auths'][auth_id] == want_auth: - continue - -- readonly = True if access_level is 'r' else False -+ readonly = access_level == 'r' - self._authorize_volume(volume_path, auth_id, readonly) - - # Recovered from partial auth updates for the auth ID's access -@@ -1120,7 +1120,7 @@ class CephFSVolumeClient(object): - - # Construct auth caps that if present might conflict with the desired - # auth caps. -- unwanted_access_level = 'r' if want_access_level is 'rw' else 'rw' -+ unwanted_access_level = 'r' if want_access_level == 'rw' else 'rw' - unwanted_mds_cap = 'allow {0} path={1}'.format(unwanted_access_level, path) - if namespace: - unwanted_osd_cap = 'allow {0} pool={1} namespace={2}'.format( --- -2.23.0 - diff --git a/0001-cmake-detect-and-use-sigdescr_np-if-available.patch b/0001-cmake-detect-and-use-sigdescr_np-if-available.patch deleted file mode 100644 index dee16759d50e0fbf772afb25fa4445252f01f716..0000000000000000000000000000000000000000 --- a/0001-cmake-detect-and-use-sigdescr_np-if-available.patch +++ /dev/null @@ -1,73 +0,0 @@ -From 9b34ba1777972808ba2af0073c967dece6c70626 Mon Sep 17 00:00:00 2001 -From: David Disseldorp -Date: Tue, 1 Sep 2020 13:49:21 +0200 -Subject: [PATCH] cmake: detect and use sigdescr_np() if available - -sys_siglist is deprecated with glibc 2.32. A new thread-safe and -async-signal safe sigdescr_np() function is provided, so use it if -available. - -Fixes: https://tracker.ceph.com/issues/47187 -Signed-off-by: David Disseldorp -(cherry picked from commit b9b6faf66ae67648626470cb4fc3f0850ac4d842) - -Conflicts: - CMakeLists.txt - cmake/modules/CephChecks.cmake -- CephChecks.cmake file does not exist in nautilus; manually cherry-picked the - change in that file to top-level CMakeLists.txt ---- - CMakeLists.txt | 1 + - src/global/signal_handler.h | 8 +++++--- - src/include/config-h.in.cmake | 3 +++ - 3 files changed, 9 insertions(+), 3 deletions(-) - -diff --git a/CMakeLists.txt b/CMakeLists.txt -index 5b7a67bec60..bdeea6f9c7d 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -105,6 +105,7 @@ CHECK_FUNCTION_EXISTS(strerror_r HAVE_Strerror_R) - CHECK_FUNCTION_EXISTS(name_to_handle_at HAVE_NAME_TO_HANDLE_AT) - CHECK_FUNCTION_EXISTS(pipe2 HAVE_PIPE2) - CHECK_FUNCTION_EXISTS(accept4 HAVE_ACCEPT4) -+CHECK_FUNCTION_EXISTS(sigdescr_np HAVE_SIGDESCR_NP) - - include(CMakePushCheckState) - cmake_push_check_state(RESET) -diff --git a/src/global/signal_handler.h b/src/global/signal_handler.h -index 476724201aa..c101b2e2873 100644 ---- a/src/global/signal_handler.h -+++ b/src/global/signal_handler.h -@@ -20,10 +20,12 @@ - - typedef void (*signal_handler_t)(int); - --#ifndef HAVE_REENTRANT_STRSIGNAL --# define sig_str(signum) sys_siglist[signum] --#else -+#ifdef HAVE_SIGDESCR_NP -+# define sig_str(signum) sigdescr_np(signum) -+#elif HAVE_REENTRANT_STRSIGNAL - # define sig_str(signum) strsignal(signum) -+#else -+# define sig_str(signum) sys_siglist[signum] - #endif - - void install_sighandler(int signum, signal_handler_t handler, int flags); -diff --git a/src/include/config-h.in.cmake b/src/include/config-h.in.cmake -index ccce8fe0017..acced696e36 100644 ---- a/src/include/config-h.in.cmake -+++ b/src/include/config-h.in.cmake -@@ -235,6 +235,9 @@ - /* Define to 1 if you have sched.h. */ - #cmakedefine HAVE_SCHED 1 - -+/* Define to 1 if you have sigdescr_np. */ -+#cmakedefine HAVE_SIGDESCR_NP 1 -+ - /* Support SSE (Streaming SIMD Extensions) instructions */ - #cmakedefine HAVE_SSE - --- -2.23.0 - diff --git a/0002-CVE-2020-27781-2.patch b/0002-CVE-2020-27781-2.patch deleted file mode 100644 index 8f7cf63d26bc42a2df92ce392650f646ee6aee0a..0000000000000000000000000000000000000000 --- a/0002-CVE-2020-27781-2.patch +++ /dev/null @@ -1,172 +0,0 @@ -From ab18393db0b34506c3fd11346b6d0f1b781b9d99 Mon Sep 17 00:00:00 2001 -From: Ramana Raja -Date: Wed, 25 Nov 2020 16:44:35 +0530 -Subject: [PATCH 2/5] pybind/ceph_volume_client: Disallow authorize auth_id - -This patch disallow the ceph_volume_client to authorize the auth_id -which is not created by ceph_volume_client. Those auth_ids could be -created by other means for other use cases which should not be modified -by ceph_volume_client. - -Fixes: https://tracker.ceph.com/issues/48555 -Signed-off-by: Ramana Raja -Signed-off-by: Kotresh HR -(cherry picked from commit 3a85d2d04028a323952a31d18cdbefb710be2e2b) ---- - src/pybind/ceph_volume_client.py | 63 ++++++++++++++++++++------------ - 1 file changed, 39 insertions(+), 24 deletions(-) - -diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py -index 25cd6b91ae2..e2ab64ee226 100644 ---- a/src/pybind/ceph_volume_client.py -+++ b/src/pybind/ceph_volume_client.py -@@ -215,6 +215,7 @@ CEPHFSVOLUMECLIENT_VERSION_HISTORY = """ - * 2 - Added get_object, put_object, delete_object methods to CephFSVolumeClient - * 3 - Allow volumes to be created without RADOS namespace isolation - * 4 - Added get_object_and_version, put_object_versioned method to CephFSVolumeClient -+ * 5 - Disallow authorize API for users not created by CephFSVolumeClient - """ - - -@@ -238,7 +239,7 @@ class CephFSVolumeClient(object): - """ - - # Current version -- version = 4 -+ version = 5 - - # Where shall we create our volumes? - POOL_PREFIX = "fsvolume_" -@@ -379,7 +380,18 @@ class CephFSVolumeClient(object): - continue - - readonly = access_level == 'r' -- self._authorize_volume(volume_path, auth_id, readonly) -+ client_entity = "client.{0}".format(auth_id) -+ try: -+ existing_caps = self._rados_command( -+ 'auth get', -+ { -+ 'entity': client_entity -+ } -+ ) -+ # FIXME: rados raising Error instead of ObjectNotFound in auth get failure -+ except rados.Error: -+ existing_caps = None -+ self._authorize_volume(volume_path, auth_id, readonly, existing_caps) - - # Recovered from partial auth updates for the auth ID's access - # to a volume. -@@ -975,6 +987,18 @@ class CephFSVolumeClient(object): - """ - - with self._auth_lock(auth_id): -+ client_entity = "client.{0}".format(auth_id) -+ try: -+ existing_caps = self._rados_command( -+ 'auth get', -+ { -+ 'entity': client_entity -+ } -+ ) -+ # FIXME: rados raising Error instead of ObjectNotFound in auth get failure -+ except rados.Error: -+ existing_caps = None -+ - # Existing meta, or None, to be updated - auth_meta = self._auth_metadata_get(auth_id) - -@@ -988,7 +1012,14 @@ class CephFSVolumeClient(object): - 'dirty': True, - } - } -+ - if auth_meta is None: -+ if existing_caps is not None: -+ msg = "auth ID: {0} exists and not created by ceph_volume_client. Not allowed to modify".format(auth_id) -+ log.error(msg) -+ raise CephFSVolumeClientError(msg) -+ -+ # non-existent auth IDs - sys.stderr.write("Creating meta for ID {0} with tenant {1}\n".format( - auth_id, tenant_id - )) -@@ -998,14 +1029,6 @@ class CephFSVolumeClient(object): - 'tenant_id': tenant_id.__str__() if tenant_id else None, - 'volumes': volume - } -- -- # Note: this is *not* guaranteeing that the key doesn't already -- # exist in Ceph: we are allowing VolumeClient tenants to -- # 'claim' existing Ceph keys. In order to prevent VolumeClient -- # tenants from reading e.g. client.admin keys, you need to -- # have configured your VolumeClient user (e.g. Manila) to -- # have mon auth caps that prevent it from accessing those keys -- # (e.g. limit it to only access keys with a manila.* prefix) - else: - # Disallow tenants to share auth IDs - if auth_meta['tenant_id'].__str__() != tenant_id.__str__(): -@@ -1025,7 +1048,7 @@ class CephFSVolumeClient(object): - self._auth_metadata_set(auth_id, auth_meta) - - with self._volume_lock(volume_path): -- key = self._authorize_volume(volume_path, auth_id, readonly) -+ key = self._authorize_volume(volume_path, auth_id, readonly, existing_caps) - - auth_meta['dirty'] = False - auth_meta['volumes'][volume_path_str]['dirty'] = False -@@ -1042,7 +1065,7 @@ class CephFSVolumeClient(object): - 'auth_key': None - } - -- def _authorize_volume(self, volume_path, auth_id, readonly): -+ def _authorize_volume(self, volume_path, auth_id, readonly, existing_caps): - vol_meta = self._volume_metadata_get(volume_path) - - access_level = 'r' if readonly else 'rw' -@@ -1061,14 +1084,14 @@ class CephFSVolumeClient(object): - vol_meta['auths'].update(auth) - self._volume_metadata_set(volume_path, vol_meta) - -- key = self._authorize_ceph(volume_path, auth_id, readonly) -+ key = self._authorize_ceph(volume_path, auth_id, readonly, existing_caps) - - vol_meta['auths'][auth_id]['dirty'] = False - self._volume_metadata_set(volume_path, vol_meta) - - return key - -- def _authorize_ceph(self, volume_path, auth_id, readonly): -+ def _authorize_ceph(self, volume_path, auth_id, readonly, existing_caps): - path = self._get_path(volume_path) - log.debug("Authorizing Ceph id '{0}' for path '{1}'".format( - auth_id, path -@@ -1096,15 +1119,7 @@ class CephFSVolumeClient(object): - want_osd_cap = 'allow {0} pool={1}'.format(want_access_level, - pool_name) - -- try: -- existing = self._rados_command( -- 'auth get', -- { -- 'entity': client_entity -- } -- ) -- # FIXME: rados raising Error instead of ObjectNotFound in auth get failure -- except rados.Error: -+ if existing_caps is None: - caps = self._rados_command( - 'auth get-or-create', - { -@@ -1116,7 +1131,7 @@ class CephFSVolumeClient(object): - }) - else: - # entity exists, update it -- cap = existing[0] -+ cap = existing_caps[0] - - # Construct auth caps that if present might conflict with the desired - # auth caps. --- -2.23.0 - diff --git a/0003-CVE-2020-27781-3.patch b/0003-CVE-2020-27781-3.patch deleted file mode 100644 index 5963c3447c1965978db67a76340b81377c09c8cb..0000000000000000000000000000000000000000 --- a/0003-CVE-2020-27781-3.patch +++ /dev/null @@ -1,113 +0,0 @@ -From 621fea6fda4f06876295f67d4767914332ff82d3 Mon Sep 17 00:00:00 2001 -From: Kotresh HR -Date: Thu, 26 Nov 2020 14:48:16 +0530 -Subject: [PATCH 3/5] pybind/ceph_volume_client: Preserve existing caps while - authorize/deauthorize auth-id - -Authorize/Deauthorize used to overwrite the caps of auth-id which would -end up deleting existing caps. This patch fixes the same by retaining -the existing caps by appending or deleting the new caps as needed. - -Fixes: https://tracker.ceph.com/issues/48555 -Signed-off-by: Kotresh HR -(cherry picked from commit 47100e528ef77e7e82dc9877424243dc6a7e7533) ---- - src/pybind/ceph_volume_client.py | 43 ++++++++++++++++++++++---------- - 1 file changed, 30 insertions(+), 13 deletions(-) - -diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py -index e2ab64ee226..ca1f361d03c 100644 ---- a/src/pybind/ceph_volume_client.py -+++ b/src/pybind/ceph_volume_client.py -@@ -973,6 +973,26 @@ class CephFSVolumeClient(object): - data['version'] = self.version - return self._metadata_set(self._volume_metadata_path(volume_path), data) - -+ def _prepare_updated_caps_list(self, existing_caps, mds_cap_str, osd_cap_str, authorize=True): -+ caps_list = [] -+ for k, v in existing_caps['caps'].items(): -+ if k == 'mds' or k == 'osd': -+ continue -+ elif k == 'mon': -+ if not authorize and v == 'allow r': -+ continue -+ caps_list.extend((k,v)) -+ -+ if mds_cap_str: -+ caps_list.extend(('mds', mds_cap_str)) -+ if osd_cap_str: -+ caps_list.extend(('osd', osd_cap_str)) -+ -+ if authorize and 'mon' not in caps_list: -+ caps_list.extend(('mon', 'allow r')) -+ -+ return caps_list -+ - def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None): - """ - Get-or-create a Ceph auth identity for `auth_id` and grant them access -@@ -1151,8 +1171,8 @@ class CephFSVolumeClient(object): - if not orig_mds_caps: - return want_mds_cap, want_osd_cap - -- mds_cap_tokens = orig_mds_caps.split(",") -- osd_cap_tokens = orig_osd_caps.split(",") -+ mds_cap_tokens = [x.strip() for x in orig_mds_caps.split(",")] -+ osd_cap_tokens = [x.strip() for x in orig_osd_caps.split(",")] - - if want_mds_cap in mds_cap_tokens: - return orig_mds_caps, orig_osd_caps -@@ -1173,15 +1193,14 @@ class CephFSVolumeClient(object): - orig_mds_caps, orig_osd_caps, want_mds_cap, want_osd_cap, - unwanted_mds_cap, unwanted_osd_cap) - -+ caps_list = self._prepare_updated_caps_list(cap, mds_cap_str, osd_cap_str) - caps = self._rados_command( - 'auth caps', - { - 'entity': client_entity, -- 'caps': [ -- 'mds', mds_cap_str, -- 'osd', osd_cap_str, -- 'mon', cap['caps'].get('mon', 'allow r')] -+ 'caps': caps_list - }) -+ - caps = self._rados_command( - 'auth get', - { -@@ -1306,8 +1325,8 @@ class CephFSVolumeClient(object): - ) - - def cap_remove(orig_mds_caps, orig_osd_caps, want_mds_caps, want_osd_caps): -- mds_cap_tokens = orig_mds_caps.split(",") -- osd_cap_tokens = orig_osd_caps.split(",") -+ mds_cap_tokens = [x.strip() for x in orig_mds_caps.split(",")] -+ osd_cap_tokens = [x.strip() for x in orig_osd_caps.split(",")] - - for want_mds_cap, want_osd_cap in zip(want_mds_caps, want_osd_caps): - if want_mds_cap in mds_cap_tokens: -@@ -1323,17 +1342,15 @@ class CephFSVolumeClient(object): - mds_cap_str, osd_cap_str = cap_remove(orig_mds_caps, orig_osd_caps, - want_mds_caps, want_osd_caps) - -- if not mds_cap_str: -+ caps_list = self._prepare_updated_caps_list(cap, mds_cap_str, osd_cap_str, authorize=False) -+ if not caps_list: - self._rados_command('auth del', {'entity': client_entity}, decode=False) - else: - self._rados_command( - 'auth caps', - { - 'entity': client_entity, -- 'caps': [ -- 'mds', mds_cap_str, -- 'osd', osd_cap_str, -- 'mon', cap['caps'].get('mon', 'allow r')] -+ 'caps': caps_list - }) - - # FIXME: rados raising Error instead of ObjectNotFound in auth get failure --- -2.23.0 - diff --git a/0004-CVE-2020-27781-4.patch b/0004-CVE-2020-27781-4.patch deleted file mode 100644 index 069efe6947d4609f553198a9e2ae0cee8f9c0e1e..0000000000000000000000000000000000000000 --- a/0004-CVE-2020-27781-4.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 6410f3dd63890f251414377de93cd51bfc372230 Mon Sep 17 00:00:00 2001 -From: Kotresh HR -Date: Sun, 6 Dec 2020 12:40:20 +0530 -Subject: [PATCH 4/5] pybind/ceph_volume_client: Optionally authorize existing - auth-ids - -Optionally allow authorizing auth-ids not created by ceph_volume_client -via the option 'allow_existing_id'. This can help existing deployers -of manila to disallow/allow authorization of pre-created auth IDs -via a manila driver config that sets 'allow_existing_id' to False/True. - -Fixes: https://tracker.ceph.com/issues/48555 -Signed-off-by: Kotresh HR -(cherry picked from commit 77b42496e25cbd4af2e80a064ddf26221b53733f) ---- - src/pybind/ceph_volume_client.py | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py -index ca1f361d03c..feeb495de00 100644 ---- a/src/pybind/ceph_volume_client.py -+++ b/src/pybind/ceph_volume_client.py -@@ -993,7 +993,7 @@ class CephFSVolumeClient(object): - - return caps_list - -- def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None): -+ def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None, allow_existing_id=False): - """ - Get-or-create a Ceph auth identity for `auth_id` and grant them access - to -@@ -1003,6 +1003,8 @@ class CephFSVolumeClient(object): - :param tenant_id: Optionally provide a stringizable object to - restrict any created cephx IDs to other callers - passing the same tenant ID. -+ :allow_existing_id: Optionally authorize existing auth-ids not -+ created by ceph_volume_client - :return: - """ - -@@ -1034,7 +1036,7 @@ class CephFSVolumeClient(object): - } - - if auth_meta is None: -- if existing_caps is not None: -+ if not allow_existing_id and existing_caps is not None: - msg = "auth ID: {0} exists and not created by ceph_volume_client. Not allowed to modify".format(auth_id) - log.error(msg) - raise CephFSVolumeClientError(msg) --- -2.23.0 - diff --git a/0005-CVE-2020-27781-5.patch b/0005-CVE-2020-27781-5.patch deleted file mode 100644 index f4f4cf0642c1a6eb2097e630a12d71bd403314c4..0000000000000000000000000000000000000000 --- a/0005-CVE-2020-27781-5.patch +++ /dev/null @@ -1,275 +0,0 @@ -From a18b92d39f5d4714e9a79c3c4a55049daec65290 Mon Sep 17 00:00:00 2001 -From: Kotresh HR -Date: Tue, 1 Dec 2020 16:14:17 +0530 -Subject: [PATCH 5/5] tasks/cephfs/test_volume_client: Add tests for - authorize/deauthorize - -1. Add testcase for authorizing auth_id which is not added by - ceph_volume_client -2. Add testcase to test 'allow_existing_id' option -3. Add testcase for deauthorizing auth_id which has got it's caps - updated out of band - -Signed-off-by: Kotresh HR -(cherry picked from commit aa4beb3d993649a696af95cf27150cc460baaf70) - -Conflicts: - qa/tasks/cephfs/test_volume_client.py ---- - qa/tasks/cephfs/test_volume_client.py | 213 +++++++++++++++++++++++++- - 1 file changed, 209 insertions(+), 4 deletions(-) - -diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py -index 0f205ecec6e..1c37b37a0b0 100644 ---- a/qa/tasks/cephfs/test_volume_client.py -+++ b/qa/tasks/cephfs/test_volume_client.py -@@ -58,7 +58,7 @@ vc.disconnect() - def _configure_guest_auth(self, volumeclient_mount, guest_mount, - guest_entity, mount_path, - namespace_prefix=None, readonly=False, -- tenant_id=None): -+ tenant_id=None, allow_existing_id=False): - """ - Set up auth credentials for the guest client to mount a volume. - -@@ -83,14 +83,16 @@ vc.disconnect() - key = self._volume_client_python(volumeclient_mount, dedent(""" - vp = VolumePath("{group_id}", "{volume_id}") - auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly}, -- tenant_id="{tenant_id}") -+ tenant_id="{tenant_id}", -+ allow_existing_id="{allow_existing_id}") - print(auth_result['auth_key']) - """.format( - group_id=group_id, - volume_id=volume_id, - guest_entity=guest_entity, - readonly=readonly, -- tenant_id=tenant_id)), volume_prefix, namespace_prefix -+ tenant_id=tenant_id, -+ allow_existing_id=allow_existing_id)), volume_prefix, namespace_prefix - ) - - # CephFSVolumeClient's authorize() does not return the secret -@@ -858,6 +860,209 @@ vc.disconnect() - ))) - self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes")) - -+ def test_authorize_auth_id_not_created_by_ceph_volume_client(self): -+ """ -+ If the auth_id already exists and is not created by -+ ceph_volume_client, it's not allowed to authorize -+ the auth-id by default. -+ """ -+ volumeclient_mount = self.mounts[1] -+ volumeclient_mount.umount_wait() -+ -+ # Configure volumeclient_mount as the handle for driving volumeclient. -+ self._configure_vc_auth(volumeclient_mount, "manila") -+ -+ group_id = "groupid" -+ volume_id = "volumeid" -+ -+ # Create auth_id -+ out = self.fs.mon_manager.raw_cluster_cmd( -+ "auth", "get-or-create", "client.guest1", -+ "mds", "allow *", -+ "osd", "allow rw", -+ "mon", "allow *" -+ ) -+ -+ auth_id = "guest1" -+ guestclient_1 = { -+ "auth_id": auth_id, -+ "tenant_id": "tenant1", -+ } -+ -+ # Create a volume. -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.create_volume(vp, 1024*1024*10) -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ ))) -+ -+ # Cannot authorize 'guestclient_1' to access the volume. -+ # It uses auth ID 'guest1', which already exists and not -+ # created by ceph_volume_client -+ with self.assertRaises(CommandFailedError): -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ auth_id=guestclient_1["auth_id"], -+ tenant_id=guestclient_1["tenant_id"] -+ ))) -+ -+ # Delete volume -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.delete_volume(vp) -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ ))) -+ -+ def test_authorize_allow_existing_id_option(self): -+ """ -+ If the auth_id already exists and is not created by -+ ceph_volume_client, it's not allowed to authorize -+ the auth-id by default but is allowed with option -+ allow_existing_id. -+ """ -+ volumeclient_mount = self.mounts[1] -+ volumeclient_mount.umount_wait() -+ -+ # Configure volumeclient_mount as the handle for driving volumeclient. -+ self._configure_vc_auth(volumeclient_mount, "manila") -+ -+ group_id = "groupid" -+ volume_id = "volumeid" -+ -+ # Create auth_id -+ out = self.fs.mon_manager.raw_cluster_cmd( -+ "auth", "get-or-create", "client.guest1", -+ "mds", "allow *", -+ "osd", "allow rw", -+ "mon", "allow *" -+ ) -+ -+ auth_id = "guest1" -+ guestclient_1 = { -+ "auth_id": auth_id, -+ "tenant_id": "tenant1", -+ } -+ -+ # Create a volume. -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.create_volume(vp, 1024*1024*10) -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ ))) -+ -+ # Cannot authorize 'guestclient_1' to access the volume -+ # by default, which already exists and not created by -+ # ceph_volume_client but is allowed with option 'allow_existing_id'. -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}", -+ allow_existing_id="{allow_existing_id}") -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ auth_id=guestclient_1["auth_id"], -+ tenant_id=guestclient_1["tenant_id"], -+ allow_existing_id=True -+ ))) -+ -+ # Delete volume -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.delete_volume(vp) -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ ))) -+ -+ def test_deauthorize_auth_id_after_out_of_band_update(self): -+ """ -+ If the auth_id authorized by ceph_volume_client is updated -+ out of band, the auth_id should not be deleted after a -+ deauthorize. It should only remove caps associated it. -+ """ -+ volumeclient_mount = self.mounts[1] -+ volumeclient_mount.umount_wait() -+ -+ # Configure volumeclient_mount as the handle for driving volumeclient. -+ self._configure_vc_auth(volumeclient_mount, "manila") -+ -+ group_id = "groupid" -+ volume_id = "volumeid" -+ -+ -+ auth_id = "guest1" -+ guestclient_1 = { -+ "auth_id": auth_id, -+ "tenant_id": "tenant1", -+ } -+ -+ # Create a volume. -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.create_volume(vp, 1024*1024*10) -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ ))) -+ -+ # Authorize 'guestclient_1' to access the volume. -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ auth_id=guestclient_1["auth_id"], -+ tenant_id=guestclient_1["tenant_id"] -+ ))) -+ -+ # Update caps for guestclient_1 out of band -+ out = self.fs.mon_manager.raw_cluster_cmd( -+ "auth", "caps", "client.guest1", -+ "mds", "allow rw path=/volumes/groupid, allow rw path=/volumes/groupid/volumeid", -+ "osd", "allow rw pool=cephfs_data namespace=fsvolumens_volumeid", -+ "mon", "allow r", -+ "mgr", "allow *" -+ ) -+ -+ # Deauthorize guestclient_1 -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.deauthorize(vp, "{guest_entity}") -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ guest_entity=guestclient_1["auth_id"] -+ ))) -+ -+ # Validate the caps of guestclient_1 after deauthorize. It should not have deleted -+ # guestclient_1. The mgr and mds caps should be present which was updated out of band. -+ out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty")) -+ -+ self.assertEqual("client.guest1", out[0]["entity"]) -+ self.assertEqual("allow rw path=/volumes/groupid", out[0]["caps"]["mds"]) -+ self.assertEqual("allow *", out[0]["caps"]["mgr"]) -+ self.assertNotIn("osd", out[0]["caps"]) -+ -+ # Delete volume -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.delete_volume(vp) -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ ))) -+ - def test_recover_metadata(self): - """ - That volume client can recover from partial auth updates using -@@ -1078,7 +1283,7 @@ vc.disconnect() - guest_mount.umount_wait() - - # Set auth caps for the auth ID using the volumeclient -- self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path) -+ self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path, allow_existing_id=True) - - # Mount the volume in the guest using the auth ID to assert that the - # auth caps are valid --- -2.23.0 - diff --git a/0006-CVE-2021-3524-1.patch b/0006-CVE-2021-3524-1.patch deleted file mode 100644 index f3049837175d180319e52935fe3159b18285f330..0000000000000000000000000000000000000000 --- a/0006-CVE-2021-3524-1.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 763aebb94678018f89427137ffbc0c5205b1edc1 Mon Sep 17 00:00:00 2001 -From: Casey Bodley -Date: Tue, 4 May 2021 08:32:58 -0400 -Subject: [PATCH] rgw: sanitize \r in s3 CORSConfiguration's ExposeHeader - -follows up on 1524d3c0c5cb11775313ea1e2bb36a93257947f2 to escape \r as -well - -Fixes: CVE-2021-3524 - -Reported-by: Sergey Bobrov -Signed-off-by: Casey Bodley -(cherry picked from commit 87806f48e7a1b8891eb90711f1cedd26f1119aac) ---- - src/rgw/rgw_cors.cc | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/src/rgw/rgw_cors.cc b/src/rgw/rgw_cors.cc -index 0b3e4f39455..bfe83d6420e 100644 ---- a/src/rgw/rgw_cors.cc -+++ b/src/rgw/rgw_cors.cc -@@ -148,8 +148,9 @@ void RGWCORSRule::format_exp_headers(string& s) { - if (s.length() > 0) - s.append(","); - // these values are sent to clients in a 'Access-Control-Expose-Headers' -- // response header, so we escape '\n' to avoid header injection -- boost::replace_all_copy(std::back_inserter(s), header, "\n", "\\n"); -+ // response header, so we escape '\n' and '\r' to avoid header injection -+ std::string tmp = boost::replace_all_copy(header, "\n", "\\n"); -+ boost::replace_all_copy(std::back_inserter(s), tmp, "\r", "\\r"); - } - } - --- -2.23.0 - diff --git a/0007-fix-build-error-PTHREAD_STACK_MIN.patch b/0007-fix-build-error-PTHREAD_STACK_MIN.patch deleted file mode 100644 index 2ac16c3c9307cda021db44b44a3b0bd3b206442d..0000000000000000000000000000000000000000 --- a/0007-fix-build-error-PTHREAD_STACK_MIN.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 4655a4fad848d2d844c19e375d34e0bf3a6228dc Mon Sep 17 00:00:00 2001 -From: markeryang -Date: Fri, 13 Aug 2021 18:33:22 +0800 -Subject: [PATCH] fix error PTHREAD_STACK_MIN - -Signed-off-by: markeryang -Signed-off-by: lxk ---- - src/boost/boost/thread/pthread/thread_data.hpp | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/src/boost/boost/thread/pthread/thread_data.hpp b/src/boost/boost/thread/pthread/thread_data.hpp -index aefbeb43..9e459b1f 100644 ---- a/src/boost/boost/thread/pthread/thread_data.hpp -+++ b/src/boost/boost/thread/pthread/thread_data.hpp -@@ -57,8 +57,9 @@ namespace boost - #else - std::size_t page_size = ::sysconf( _SC_PAGESIZE); - #endif --#if PTHREAD_STACK_MIN > 0 -- if (size 0 && size < (std::size_t)PTHREAD_STACK_MIN) -+ size = (std::size_t)PTHREAD_STACK_MIN; - #endif - size = ((size+page_size-1)/page_size)*page_size; - int res = pthread_attr_setstacksize(&val_, size); --- -2.23.0 diff --git a/ceph-14.2.15.tar.gz b/ceph-16.2.7.tar.gz similarity index 78% rename from ceph-14.2.15.tar.gz rename to ceph-16.2.7.tar.gz index 67b858f07bd7e4ddaa94f95c282f020b67bf256a..6449234f692c3c80efe16d43a14e1f6862cf7cfb 100644 Binary files a/ceph-14.2.15.tar.gz and b/ceph-16.2.7.tar.gz differ diff --git a/ceph.spec b/ceph.spec index 66c7bfeee35c82fcda9bfd12db722ab71a2e797e..63767abdd11d05e9640b752451e4c1d9ed418cee 100644 --- a/ceph.spec +++ b/ceph.spec @@ -109,8 +109,8 @@ # main package definition ################################################################################# Name: ceph -Version: 14.2.15 -Release: 6%{?dist} +Version: 16.2.7 +Release: 1%{?dist} %if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} Epoch: 2 %endif @@ -125,18 +125,7 @@ License: LGPL-2.1 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD-3-Clause and Group: System/Filesystems %endif URL: http://ceph.com/ -Source0: %{?_remote_tarball_prefix}ceph-14.2.15.tar.gz - -# backport -Patch0: 0001-cmake-detect-and-use-sigdescr_np-if-available.patch -# backport for cves -Patch1: 0001-CVE-2020-27781-1.patch -Patch2: 0002-CVE-2020-27781-2.patch -Patch3: 0003-CVE-2020-27781-3.patch -Patch4: 0004-CVE-2020-27781-4.patch -Patch5: 0005-CVE-2020-27781-5.patch -Patch6: 0006-CVE-2021-3524-1.patch -Patch7: 0007-fix-build-error-PTHREAD_STACK_MIN.patch +Source0: %{?_remote_tarball_prefix}ceph-%{version}.tar.gz %if 0%{?suse_version} # _insert_obs_source_lines_here @@ -269,7 +258,6 @@ BuildRequires: golang-github-prometheus-prometheus %endif %if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} Requires: systemd -BuildRequires: boost-random BuildRequires: nss-devel BuildRequires: keyutils-libs-devel BuildRequires: libibverbs-devel @@ -360,6 +348,7 @@ BuildRequires: libnuma-devel %if 0%{?rhel} >= 8 || 0%{?openEuler} BuildRequires: /usr/bin/pathfix.py %endif +BuildRequires: lua-devel sqlite-devel cryptsetup-devel %description Ceph is a massively scalable, open-source, distributed storage system that runs @@ -482,7 +471,6 @@ Requires: python%{_python_buildid}-Werkzeug %if 0%{?weak_deps} Recommends: ceph-mgr-dashboard = %{_epoch_prefix}%{version}-%{release} Recommends: ceph-mgr-diskprediction-local = %{_epoch_prefix}%{version}-%{release} -Recommends: ceph-mgr-diskprediction-cloud = %{_epoch_prefix}%{version}-%{release} Recommends: ceph-mgr-rook = %{_epoch_prefix}%{version}-%{release} Recommends: ceph-mgr-k8sevents = %{_epoch_prefix}%{version}-%{release} Recommends: ceph-mgr-ssh = %{_epoch_prefix}%{version}-%{release} @@ -518,17 +506,6 @@ Requires: scipy ceph-mgr-diskprediction-local is a ceph-mgr plugin that tries to predict disk failures using local algorithms and machine-learning databases. -%package mgr-diskprediction-cloud -Summary: ceph-mgr diskprediction_cloud plugin -BuildArch: noarch -%if 0%{?suse_version} -Group: System/Filesystems -%endif -Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} -%description mgr-diskprediction-cloud -ceph-mgr-diskprediction-cloud is a ceph-mgr plugin that tries to predict -disk failures using services in the Google cloud. - %package fuse Summary: Ceph fuse-based client %if 0%{?suse_version} @@ -956,7 +933,7 @@ This package provides Ceph’s default alerts for Prometheus. # common ################################################################################# %prep -%autosetup -p1 -n ceph-14.2.15 +%autosetup -p1 -n ceph-%{version} %build # LTO can be enabled as soon as the following GCC bug is fixed: @@ -1075,8 +1052,7 @@ ${CMAKE} .. \ -DBOOST_J=$CEPH_SMP_NCPUS \ -DWITH_GRAFANA=ON -taskset -c 0-31 make "$CEPH_MFLAGS_JOBS" - +make %{?_smp_mflags} %if 0%{with make_check} %check @@ -1117,7 +1093,7 @@ ln -sf %{_sbindir}/mount.ceph %{buildroot}/sbin/mount.ceph install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules # sudoers.d -install -m 0600 -D sudoers.d/ceph-osd-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-osd-smartctl +install -m 0600 -D sudoers.d/ceph-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-smartctl %if 0%{?rhel} >= 8 || 0%{?openEuler} pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/* @@ -1407,40 +1383,11 @@ fi %files mgr %{_bindir}/ceph-mgr %dir %{_datadir}/ceph/mgr -%{_datadir}/ceph/mgr/alerts -%{_datadir}/ceph/mgr/ansible -%{_datadir}/ceph/mgr/balancer -%{_datadir}/ceph/mgr/crash -%{_datadir}/ceph/mgr/deepsea -%{_datadir}/ceph/mgr/devicehealth -%{_datadir}/ceph/mgr/influx -%{_datadir}/ceph/mgr/insights -%{_datadir}/ceph/mgr/iostat -%{_datadir}/ceph/mgr/localpool %{_datadir}/ceph/mgr/mgr_module.* %{_datadir}/ceph/mgr/mgr_util.* -%{_datadir}/ceph/mgr/orchestrator_cli -%{_datadir}/ceph/mgr/orchestrator.* -%{_datadir}/ceph/mgr/osd_perf_query -%{_datadir}/ceph/mgr/pg_autoscaler -%{_datadir}/ceph/mgr/progress -%{_datadir}/ceph/mgr/prometheus -%{_datadir}/ceph/mgr/rbd_support -%{_datadir}/ceph/mgr/restful -%{_datadir}/ceph/mgr/selftest -%{_datadir}/ceph/mgr/status -%{_datadir}/ceph/mgr/telegraf -%{_datadir}/ceph/mgr/telemetry -%{_datadir}/ceph/mgr/test_orchestrator -%{_datadir}/ceph/mgr/volumes -%{_datadir}/ceph/mgr/zabbix %{_unitdir}/ceph-mgr@.service %{_unitdir}/ceph-mgr.target %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mgr -%exclude %{_datadir}/ceph/mgr/rook -%exclude %{_datadir}/ceph/mgr/k8sevents -%exclude %{_datadir}/ceph/mgr/dashboard -%exclude %{_datadir}/ceph/mgr/ssh %post mgr %if 0%{?suse_version} @@ -1496,19 +1443,6 @@ if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi -%files mgr-diskprediction-cloud -%{_datadir}/ceph/mgr/diskprediction_cloud - -%post mgr-diskprediction-cloud -if [ $1 -eq 1 ] ; then - /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : -fi - -%postun mgr-diskprediction-cloud -if [ $1 -eq 1 ] ; then - /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : -fi - %files mon %{_bindir}/ceph-mon %{_bindir}/ceph-monstore-tool @@ -2044,6 +1978,9 @@ exit 0 %changelog +* Sun Dec 26 2021 sdlzx - 1:16.2.7-1 +- Update to 16.2.7 + * Fri Aug 13 2021 yanglongkang - 1:14.2.15-6 - fix build error PTHREAD_STACJK_MIN