diff --git a/0001-CVE-2020-27781-1.patch b/0001-CVE-2020-27781-1.patch deleted file mode 100644 index 12f4db54803459a273799c47885df741fd48dff1..0000000000000000000000000000000000000000 --- a/0001-CVE-2020-27781-1.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 5dbc6bf0a67183bff7d7ca48ccd90ebbce492408 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?=C4=90=E1=BA=B7ng=20Minh=20D=C5=A9ng?= -Date: Sun, 10 May 2020 11:37:23 +0700 -Subject: [PATCH 1/5] pybind/ceph_volume_client: Fix PEP-8 SyntaxWarning -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Signed-off-by: Đặng Minh Dũng -(cherry picked from commit 3ce9a89a5a1a2d7fa3d57c597b781a6aece7cbb5) ---- - src/pybind/ceph_volume_client.py | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py -index 7d7e5b49e40..25cd6b91ae2 100644 ---- a/src/pybind/ceph_volume_client.py -+++ b/src/pybind/ceph_volume_client.py -@@ -355,7 +355,7 @@ class CephFSVolumeClient(object): - continue - - (group_id, volume_id) = volume.split('/') -- group_id = group_id if group_id is not 'None' else None -+ group_id = group_id if group_id != 'None' else None - volume_path = VolumePath(group_id, volume_id) - access_level = volume_data['access_level'] - -@@ -378,7 +378,7 @@ class CephFSVolumeClient(object): - if vol_meta['auths'][auth_id] == want_auth: - continue - -- readonly = True if access_level is 'r' else False -+ readonly = access_level == 'r' - self._authorize_volume(volume_path, auth_id, readonly) - - # Recovered from partial auth updates for the auth ID's access -@@ -1120,7 +1120,7 @@ class CephFSVolumeClient(object): - - # Construct auth caps that if present might conflict with the desired - # auth caps. -- unwanted_access_level = 'r' if want_access_level is 'rw' else 'rw' -+ unwanted_access_level = 'r' if want_access_level == 'rw' else 'rw' - unwanted_mds_cap = 'allow {0} path={1}'.format(unwanted_access_level, path) - if namespace: - unwanted_osd_cap = 'allow {0} pool={1} namespace={2}'.format( --- -2.23.0 - diff --git a/0001-cmake-detect-and-use-sigdescr_np-if-available.patch b/0001-cmake-detect-and-use-sigdescr_np-if-available.patch deleted file mode 100644 index dee16759d50e0fbf772afb25fa4445252f01f716..0000000000000000000000000000000000000000 --- a/0001-cmake-detect-and-use-sigdescr_np-if-available.patch +++ /dev/null @@ -1,73 +0,0 @@ -From 9b34ba1777972808ba2af0073c967dece6c70626 Mon Sep 17 00:00:00 2001 -From: David Disseldorp -Date: Tue, 1 Sep 2020 13:49:21 +0200 -Subject: [PATCH] cmake: detect and use sigdescr_np() if available - -sys_siglist is deprecated with glibc 2.32. A new thread-safe and -async-signal safe sigdescr_np() function is provided, so use it if -available. - -Fixes: https://tracker.ceph.com/issues/47187 -Signed-off-by: David Disseldorp -(cherry picked from commit b9b6faf66ae67648626470cb4fc3f0850ac4d842) - -Conflicts: - CMakeLists.txt - cmake/modules/CephChecks.cmake -- CephChecks.cmake file does not exist in nautilus; manually cherry-picked the - change in that file to top-level CMakeLists.txt ---- - CMakeLists.txt | 1 + - src/global/signal_handler.h | 8 +++++--- - src/include/config-h.in.cmake | 3 +++ - 3 files changed, 9 insertions(+), 3 deletions(-) - -diff --git a/CMakeLists.txt b/CMakeLists.txt -index 5b7a67bec60..bdeea6f9c7d 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -105,6 +105,7 @@ CHECK_FUNCTION_EXISTS(strerror_r HAVE_Strerror_R) - CHECK_FUNCTION_EXISTS(name_to_handle_at HAVE_NAME_TO_HANDLE_AT) - CHECK_FUNCTION_EXISTS(pipe2 HAVE_PIPE2) - CHECK_FUNCTION_EXISTS(accept4 HAVE_ACCEPT4) -+CHECK_FUNCTION_EXISTS(sigdescr_np HAVE_SIGDESCR_NP) - - include(CMakePushCheckState) - cmake_push_check_state(RESET) -diff --git a/src/global/signal_handler.h b/src/global/signal_handler.h -index 476724201aa..c101b2e2873 100644 ---- a/src/global/signal_handler.h -+++ b/src/global/signal_handler.h -@@ -20,10 +20,12 @@ - - typedef void (*signal_handler_t)(int); - --#ifndef HAVE_REENTRANT_STRSIGNAL --# define sig_str(signum) sys_siglist[signum] --#else -+#ifdef HAVE_SIGDESCR_NP -+# define sig_str(signum) sigdescr_np(signum) -+#elif HAVE_REENTRANT_STRSIGNAL - # define sig_str(signum) strsignal(signum) -+#else -+# define sig_str(signum) sys_siglist[signum] - #endif - - void install_sighandler(int signum, signal_handler_t handler, int flags); -diff --git a/src/include/config-h.in.cmake b/src/include/config-h.in.cmake -index ccce8fe0017..acced696e36 100644 ---- a/src/include/config-h.in.cmake -+++ b/src/include/config-h.in.cmake -@@ -235,6 +235,9 @@ - /* Define to 1 if you have sched.h. */ - #cmakedefine HAVE_SCHED 1 - -+/* Define to 1 if you have sigdescr_np. */ -+#cmakedefine HAVE_SIGDESCR_NP 1 -+ - /* Support SSE (Streaming SIMD Extensions) instructions */ - #cmakedefine HAVE_SSE - --- -2.23.0 - diff --git a/0001-fix-error-transform-is-not-a-member-of-std.patch b/0001-fix-error-transform-is-not-a-member-of-std.patch new file mode 100644 index 0000000000000000000000000000000000000000..78ddfd9c2fdd5412910547c751d6765bc2a2a258 --- /dev/null +++ b/0001-fix-error-transform-is-not-a-member-of-std.patch @@ -0,0 +1,24 @@ +From f22ee023ad92b34697c743936451731c0ad4dbb6 Mon Sep 17 00:00:00 2001 +From: liuqinfei <18138800392@163.com> +Date: Thu, 30 Dec 2021 09:46:33 +0800 +Subject: [PATCH] fix error: 'transform' is not a member of 'std' + +--- + src/common/Formatter.cc | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/src/common/Formatter.cc b/src/common/Formatter.cc +index 362deffb5ab..4e9838e3a36 100644 +--- a/src/common/Formatter.cc ++++ b/src/common/Formatter.cc +@@ -18,6 +18,7 @@ + #include "common/escape.h" + #include "include/buffer.h" + ++#include + #include + #include + #include +-- +2.30.0 + diff --git a/0002-CVE-2020-27781-2.patch b/0002-CVE-2020-27781-2.patch deleted file mode 100644 index 8f7cf63d26bc42a2df92ce392650f646ee6aee0a..0000000000000000000000000000000000000000 --- a/0002-CVE-2020-27781-2.patch +++ /dev/null @@ -1,172 +0,0 @@ -From ab18393db0b34506c3fd11346b6d0f1b781b9d99 Mon Sep 17 00:00:00 2001 -From: Ramana Raja -Date: Wed, 25 Nov 2020 16:44:35 +0530 -Subject: [PATCH 2/5] pybind/ceph_volume_client: Disallow authorize auth_id - -This patch disallow the ceph_volume_client to authorize the auth_id -which is not created by ceph_volume_client. Those auth_ids could be -created by other means for other use cases which should not be modified -by ceph_volume_client. - -Fixes: https://tracker.ceph.com/issues/48555 -Signed-off-by: Ramana Raja -Signed-off-by: Kotresh HR -(cherry picked from commit 3a85d2d04028a323952a31d18cdbefb710be2e2b) ---- - src/pybind/ceph_volume_client.py | 63 ++++++++++++++++++++------------ - 1 file changed, 39 insertions(+), 24 deletions(-) - -diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py -index 25cd6b91ae2..e2ab64ee226 100644 ---- a/src/pybind/ceph_volume_client.py -+++ b/src/pybind/ceph_volume_client.py -@@ -215,6 +215,7 @@ CEPHFSVOLUMECLIENT_VERSION_HISTORY = """ - * 2 - Added get_object, put_object, delete_object methods to CephFSVolumeClient - * 3 - Allow volumes to be created without RADOS namespace isolation - * 4 - Added get_object_and_version, put_object_versioned method to CephFSVolumeClient -+ * 5 - Disallow authorize API for users not created by CephFSVolumeClient - """ - - -@@ -238,7 +239,7 @@ class CephFSVolumeClient(object): - """ - - # Current version -- version = 4 -+ version = 5 - - # Where shall we create our volumes? - POOL_PREFIX = "fsvolume_" -@@ -379,7 +380,18 @@ class CephFSVolumeClient(object): - continue - - readonly = access_level == 'r' -- self._authorize_volume(volume_path, auth_id, readonly) -+ client_entity = "client.{0}".format(auth_id) -+ try: -+ existing_caps = self._rados_command( -+ 'auth get', -+ { -+ 'entity': client_entity -+ } -+ ) -+ # FIXME: rados raising Error instead of ObjectNotFound in auth get failure -+ except rados.Error: -+ existing_caps = None -+ self._authorize_volume(volume_path, auth_id, readonly, existing_caps) - - # Recovered from partial auth updates for the auth ID's access - # to a volume. -@@ -975,6 +987,18 @@ class CephFSVolumeClient(object): - """ - - with self._auth_lock(auth_id): -+ client_entity = "client.{0}".format(auth_id) -+ try: -+ existing_caps = self._rados_command( -+ 'auth get', -+ { -+ 'entity': client_entity -+ } -+ ) -+ # FIXME: rados raising Error instead of ObjectNotFound in auth get failure -+ except rados.Error: -+ existing_caps = None -+ - # Existing meta, or None, to be updated - auth_meta = self._auth_metadata_get(auth_id) - -@@ -988,7 +1012,14 @@ class CephFSVolumeClient(object): - 'dirty': True, - } - } -+ - if auth_meta is None: -+ if existing_caps is not None: -+ msg = "auth ID: {0} exists and not created by ceph_volume_client. Not allowed to modify".format(auth_id) -+ log.error(msg) -+ raise CephFSVolumeClientError(msg) -+ -+ # non-existent auth IDs - sys.stderr.write("Creating meta for ID {0} with tenant {1}\n".format( - auth_id, tenant_id - )) -@@ -998,14 +1029,6 @@ class CephFSVolumeClient(object): - 'tenant_id': tenant_id.__str__() if tenant_id else None, - 'volumes': volume - } -- -- # Note: this is *not* guaranteeing that the key doesn't already -- # exist in Ceph: we are allowing VolumeClient tenants to -- # 'claim' existing Ceph keys. In order to prevent VolumeClient -- # tenants from reading e.g. client.admin keys, you need to -- # have configured your VolumeClient user (e.g. Manila) to -- # have mon auth caps that prevent it from accessing those keys -- # (e.g. limit it to only access keys with a manila.* prefix) - else: - # Disallow tenants to share auth IDs - if auth_meta['tenant_id'].__str__() != tenant_id.__str__(): -@@ -1025,7 +1048,7 @@ class CephFSVolumeClient(object): - self._auth_metadata_set(auth_id, auth_meta) - - with self._volume_lock(volume_path): -- key = self._authorize_volume(volume_path, auth_id, readonly) -+ key = self._authorize_volume(volume_path, auth_id, readonly, existing_caps) - - auth_meta['dirty'] = False - auth_meta['volumes'][volume_path_str]['dirty'] = False -@@ -1042,7 +1065,7 @@ class CephFSVolumeClient(object): - 'auth_key': None - } - -- def _authorize_volume(self, volume_path, auth_id, readonly): -+ def _authorize_volume(self, volume_path, auth_id, readonly, existing_caps): - vol_meta = self._volume_metadata_get(volume_path) - - access_level = 'r' if readonly else 'rw' -@@ -1061,14 +1084,14 @@ class CephFSVolumeClient(object): - vol_meta['auths'].update(auth) - self._volume_metadata_set(volume_path, vol_meta) - -- key = self._authorize_ceph(volume_path, auth_id, readonly) -+ key = self._authorize_ceph(volume_path, auth_id, readonly, existing_caps) - - vol_meta['auths'][auth_id]['dirty'] = False - self._volume_metadata_set(volume_path, vol_meta) - - return key - -- def _authorize_ceph(self, volume_path, auth_id, readonly): -+ def _authorize_ceph(self, volume_path, auth_id, readonly, existing_caps): - path = self._get_path(volume_path) - log.debug("Authorizing Ceph id '{0}' for path '{1}'".format( - auth_id, path -@@ -1096,15 +1119,7 @@ class CephFSVolumeClient(object): - want_osd_cap = 'allow {0} pool={1}'.format(want_access_level, - pool_name) - -- try: -- existing = self._rados_command( -- 'auth get', -- { -- 'entity': client_entity -- } -- ) -- # FIXME: rados raising Error instead of ObjectNotFound in auth get failure -- except rados.Error: -+ if existing_caps is None: - caps = self._rados_command( - 'auth get-or-create', - { -@@ -1116,7 +1131,7 @@ class CephFSVolumeClient(object): - }) - else: - # entity exists, update it -- cap = existing[0] -+ cap = existing_caps[0] - - # Construct auth caps that if present might conflict with the desired - # auth caps. --- -2.23.0 - diff --git a/0002-enable-install-deps-in-openEuler.patch b/0002-enable-install-deps-in-openEuler.patch new file mode 100644 index 0000000000000000000000000000000000000000..b8dc72dc9f614274bc533cc186580815f066b725 --- /dev/null +++ b/0002-enable-install-deps-in-openEuler.patch @@ -0,0 +1,25 @@ +From 73d7c0e9c106d6a6aeda0491a426e8f458e0cb13 Mon Sep 17 00:00:00 2001 +From: liuqinfei <18138800392@163.com> +Date: Thu, 30 Dec 2021 11:48:33 +0800 +Subject: [PATCH 2/2] enable install deps in openEuler + +--- + install-deps.sh | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/install-deps.sh b/install-deps.sh +index 3ba5e47ff58..b35013607e4 100755 +--- a/install-deps.sh ++++ b/install-deps.sh +@@ -337,7 +337,7 @@ else + $SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove ceph-build-deps + if [ "$control" != "debian/control" ] ; then rm $control; fi + ;; +- centos|fedora|rhel|ol|virtuozzo) ++ centos|fedora|rhel|ol|virtuozzo|openEuler) + builddepcmd="dnf -y builddep --allowerasing" + echo "Using dnf to install dependencies" + case "$ID" in +-- +2.30.0 + diff --git a/0003-CVE-2020-27781-3.patch b/0003-CVE-2020-27781-3.patch deleted file mode 100644 index 5963c3447c1965978db67a76340b81377c09c8cb..0000000000000000000000000000000000000000 --- a/0003-CVE-2020-27781-3.patch +++ /dev/null @@ -1,113 +0,0 @@ -From 621fea6fda4f06876295f67d4767914332ff82d3 Mon Sep 17 00:00:00 2001 -From: Kotresh HR -Date: Thu, 26 Nov 2020 14:48:16 +0530 -Subject: [PATCH 3/5] pybind/ceph_volume_client: Preserve existing caps while - authorize/deauthorize auth-id - -Authorize/Deauthorize used to overwrite the caps of auth-id which would -end up deleting existing caps. This patch fixes the same by retaining -the existing caps by appending or deleting the new caps as needed. - -Fixes: https://tracker.ceph.com/issues/48555 -Signed-off-by: Kotresh HR -(cherry picked from commit 47100e528ef77e7e82dc9877424243dc6a7e7533) ---- - src/pybind/ceph_volume_client.py | 43 ++++++++++++++++++++++---------- - 1 file changed, 30 insertions(+), 13 deletions(-) - -diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py -index e2ab64ee226..ca1f361d03c 100644 ---- a/src/pybind/ceph_volume_client.py -+++ b/src/pybind/ceph_volume_client.py -@@ -973,6 +973,26 @@ class CephFSVolumeClient(object): - data['version'] = self.version - return self._metadata_set(self._volume_metadata_path(volume_path), data) - -+ def _prepare_updated_caps_list(self, existing_caps, mds_cap_str, osd_cap_str, authorize=True): -+ caps_list = [] -+ for k, v in existing_caps['caps'].items(): -+ if k == 'mds' or k == 'osd': -+ continue -+ elif k == 'mon': -+ if not authorize and v == 'allow r': -+ continue -+ caps_list.extend((k,v)) -+ -+ if mds_cap_str: -+ caps_list.extend(('mds', mds_cap_str)) -+ if osd_cap_str: -+ caps_list.extend(('osd', osd_cap_str)) -+ -+ if authorize and 'mon' not in caps_list: -+ caps_list.extend(('mon', 'allow r')) -+ -+ return caps_list -+ - def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None): - """ - Get-or-create a Ceph auth identity for `auth_id` and grant them access -@@ -1151,8 +1171,8 @@ class CephFSVolumeClient(object): - if not orig_mds_caps: - return want_mds_cap, want_osd_cap - -- mds_cap_tokens = orig_mds_caps.split(",") -- osd_cap_tokens = orig_osd_caps.split(",") -+ mds_cap_tokens = [x.strip() for x in orig_mds_caps.split(",")] -+ osd_cap_tokens = [x.strip() for x in orig_osd_caps.split(",")] - - if want_mds_cap in mds_cap_tokens: - return orig_mds_caps, orig_osd_caps -@@ -1173,15 +1193,14 @@ class CephFSVolumeClient(object): - orig_mds_caps, orig_osd_caps, want_mds_cap, want_osd_cap, - unwanted_mds_cap, unwanted_osd_cap) - -+ caps_list = self._prepare_updated_caps_list(cap, mds_cap_str, osd_cap_str) - caps = self._rados_command( - 'auth caps', - { - 'entity': client_entity, -- 'caps': [ -- 'mds', mds_cap_str, -- 'osd', osd_cap_str, -- 'mon', cap['caps'].get('mon', 'allow r')] -+ 'caps': caps_list - }) -+ - caps = self._rados_command( - 'auth get', - { -@@ -1306,8 +1325,8 @@ class CephFSVolumeClient(object): - ) - - def cap_remove(orig_mds_caps, orig_osd_caps, want_mds_caps, want_osd_caps): -- mds_cap_tokens = orig_mds_caps.split(",") -- osd_cap_tokens = orig_osd_caps.split(",") -+ mds_cap_tokens = [x.strip() for x in orig_mds_caps.split(",")] -+ osd_cap_tokens = [x.strip() for x in orig_osd_caps.split(",")] - - for want_mds_cap, want_osd_cap in zip(want_mds_caps, want_osd_caps): - if want_mds_cap in mds_cap_tokens: -@@ -1323,17 +1342,15 @@ class CephFSVolumeClient(object): - mds_cap_str, osd_cap_str = cap_remove(orig_mds_caps, orig_osd_caps, - want_mds_caps, want_osd_caps) - -- if not mds_cap_str: -+ caps_list = self._prepare_updated_caps_list(cap, mds_cap_str, osd_cap_str, authorize=False) -+ if not caps_list: - self._rados_command('auth del', {'entity': client_entity}, decode=False) - else: - self._rados_command( - 'auth caps', - { - 'entity': client_entity, -- 'caps': [ -- 'mds', mds_cap_str, -- 'osd', osd_cap_str, -- 'mon', cap['caps'].get('mon', 'allow r')] -+ 'caps': caps_list - }) - - # FIXME: rados raising Error instead of ObjectNotFound in auth get failure --- -2.23.0 - diff --git a/0004-CVE-2020-27781-4.patch b/0004-CVE-2020-27781-4.patch deleted file mode 100644 index 069efe6947d4609f553198a9e2ae0cee8f9c0e1e..0000000000000000000000000000000000000000 --- a/0004-CVE-2020-27781-4.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 6410f3dd63890f251414377de93cd51bfc372230 Mon Sep 17 00:00:00 2001 -From: Kotresh HR -Date: Sun, 6 Dec 2020 12:40:20 +0530 -Subject: [PATCH 4/5] pybind/ceph_volume_client: Optionally authorize existing - auth-ids - -Optionally allow authorizing auth-ids not created by ceph_volume_client -via the option 'allow_existing_id'. This can help existing deployers -of manila to disallow/allow authorization of pre-created auth IDs -via a manila driver config that sets 'allow_existing_id' to False/True. - -Fixes: https://tracker.ceph.com/issues/48555 -Signed-off-by: Kotresh HR -(cherry picked from commit 77b42496e25cbd4af2e80a064ddf26221b53733f) ---- - src/pybind/ceph_volume_client.py | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py -index ca1f361d03c..feeb495de00 100644 ---- a/src/pybind/ceph_volume_client.py -+++ b/src/pybind/ceph_volume_client.py -@@ -993,7 +993,7 @@ class CephFSVolumeClient(object): - - return caps_list - -- def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None): -+ def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None, allow_existing_id=False): - """ - Get-or-create a Ceph auth identity for `auth_id` and grant them access - to -@@ -1003,6 +1003,8 @@ class CephFSVolumeClient(object): - :param tenant_id: Optionally provide a stringizable object to - restrict any created cephx IDs to other callers - passing the same tenant ID. -+ :allow_existing_id: Optionally authorize existing auth-ids not -+ created by ceph_volume_client - :return: - """ - -@@ -1034,7 +1036,7 @@ class CephFSVolumeClient(object): - } - - if auth_meta is None: -- if existing_caps is not None: -+ if not allow_existing_id and existing_caps is not None: - msg = "auth ID: {0} exists and not created by ceph_volume_client. Not allowed to modify".format(auth_id) - log.error(msg) - raise CephFSVolumeClientError(msg) --- -2.23.0 - diff --git a/0005-CVE-2020-27781-5.patch b/0005-CVE-2020-27781-5.patch deleted file mode 100644 index f4f4cf0642c1a6eb2097e630a12d71bd403314c4..0000000000000000000000000000000000000000 --- a/0005-CVE-2020-27781-5.patch +++ /dev/null @@ -1,275 +0,0 @@ -From a18b92d39f5d4714e9a79c3c4a55049daec65290 Mon Sep 17 00:00:00 2001 -From: Kotresh HR -Date: Tue, 1 Dec 2020 16:14:17 +0530 -Subject: [PATCH 5/5] tasks/cephfs/test_volume_client: Add tests for - authorize/deauthorize - -1. Add testcase for authorizing auth_id which is not added by - ceph_volume_client -2. Add testcase to test 'allow_existing_id' option -3. Add testcase for deauthorizing auth_id which has got it's caps - updated out of band - -Signed-off-by: Kotresh HR -(cherry picked from commit aa4beb3d993649a696af95cf27150cc460baaf70) - -Conflicts: - qa/tasks/cephfs/test_volume_client.py ---- - qa/tasks/cephfs/test_volume_client.py | 213 +++++++++++++++++++++++++- - 1 file changed, 209 insertions(+), 4 deletions(-) - -diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py -index 0f205ecec6e..1c37b37a0b0 100644 ---- a/qa/tasks/cephfs/test_volume_client.py -+++ b/qa/tasks/cephfs/test_volume_client.py -@@ -58,7 +58,7 @@ vc.disconnect() - def _configure_guest_auth(self, volumeclient_mount, guest_mount, - guest_entity, mount_path, - namespace_prefix=None, readonly=False, -- tenant_id=None): -+ tenant_id=None, allow_existing_id=False): - """ - Set up auth credentials for the guest client to mount a volume. - -@@ -83,14 +83,16 @@ vc.disconnect() - key = self._volume_client_python(volumeclient_mount, dedent(""" - vp = VolumePath("{group_id}", "{volume_id}") - auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly}, -- tenant_id="{tenant_id}") -+ tenant_id="{tenant_id}", -+ allow_existing_id="{allow_existing_id}") - print(auth_result['auth_key']) - """.format( - group_id=group_id, - volume_id=volume_id, - guest_entity=guest_entity, - readonly=readonly, -- tenant_id=tenant_id)), volume_prefix, namespace_prefix -+ tenant_id=tenant_id, -+ allow_existing_id=allow_existing_id)), volume_prefix, namespace_prefix - ) - - # CephFSVolumeClient's authorize() does not return the secret -@@ -858,6 +860,209 @@ vc.disconnect() - ))) - self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes")) - -+ def test_authorize_auth_id_not_created_by_ceph_volume_client(self): -+ """ -+ If the auth_id already exists and is not created by -+ ceph_volume_client, it's not allowed to authorize -+ the auth-id by default. -+ """ -+ volumeclient_mount = self.mounts[1] -+ volumeclient_mount.umount_wait() -+ -+ # Configure volumeclient_mount as the handle for driving volumeclient. -+ self._configure_vc_auth(volumeclient_mount, "manila") -+ -+ group_id = "groupid" -+ volume_id = "volumeid" -+ -+ # Create auth_id -+ out = self.fs.mon_manager.raw_cluster_cmd( -+ "auth", "get-or-create", "client.guest1", -+ "mds", "allow *", -+ "osd", "allow rw", -+ "mon", "allow *" -+ ) -+ -+ auth_id = "guest1" -+ guestclient_1 = { -+ "auth_id": auth_id, -+ "tenant_id": "tenant1", -+ } -+ -+ # Create a volume. -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.create_volume(vp, 1024*1024*10) -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ ))) -+ -+ # Cannot authorize 'guestclient_1' to access the volume. -+ # It uses auth ID 'guest1', which already exists and not -+ # created by ceph_volume_client -+ with self.assertRaises(CommandFailedError): -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ auth_id=guestclient_1["auth_id"], -+ tenant_id=guestclient_1["tenant_id"] -+ ))) -+ -+ # Delete volume -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.delete_volume(vp) -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ ))) -+ -+ def test_authorize_allow_existing_id_option(self): -+ """ -+ If the auth_id already exists and is not created by -+ ceph_volume_client, it's not allowed to authorize -+ the auth-id by default but is allowed with option -+ allow_existing_id. -+ """ -+ volumeclient_mount = self.mounts[1] -+ volumeclient_mount.umount_wait() -+ -+ # Configure volumeclient_mount as the handle for driving volumeclient. -+ self._configure_vc_auth(volumeclient_mount, "manila") -+ -+ group_id = "groupid" -+ volume_id = "volumeid" -+ -+ # Create auth_id -+ out = self.fs.mon_manager.raw_cluster_cmd( -+ "auth", "get-or-create", "client.guest1", -+ "mds", "allow *", -+ "osd", "allow rw", -+ "mon", "allow *" -+ ) -+ -+ auth_id = "guest1" -+ guestclient_1 = { -+ "auth_id": auth_id, -+ "tenant_id": "tenant1", -+ } -+ -+ # Create a volume. -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.create_volume(vp, 1024*1024*10) -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ ))) -+ -+ # Cannot authorize 'guestclient_1' to access the volume -+ # by default, which already exists and not created by -+ # ceph_volume_client but is allowed with option 'allow_existing_id'. -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}", -+ allow_existing_id="{allow_existing_id}") -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ auth_id=guestclient_1["auth_id"], -+ tenant_id=guestclient_1["tenant_id"], -+ allow_existing_id=True -+ ))) -+ -+ # Delete volume -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.delete_volume(vp) -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ ))) -+ -+ def test_deauthorize_auth_id_after_out_of_band_update(self): -+ """ -+ If the auth_id authorized by ceph_volume_client is updated -+ out of band, the auth_id should not be deleted after a -+ deauthorize. It should only remove caps associated it. -+ """ -+ volumeclient_mount = self.mounts[1] -+ volumeclient_mount.umount_wait() -+ -+ # Configure volumeclient_mount as the handle for driving volumeclient. -+ self._configure_vc_auth(volumeclient_mount, "manila") -+ -+ group_id = "groupid" -+ volume_id = "volumeid" -+ -+ -+ auth_id = "guest1" -+ guestclient_1 = { -+ "auth_id": auth_id, -+ "tenant_id": "tenant1", -+ } -+ -+ # Create a volume. -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.create_volume(vp, 1024*1024*10) -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ ))) -+ -+ # Authorize 'guestclient_1' to access the volume. -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ auth_id=guestclient_1["auth_id"], -+ tenant_id=guestclient_1["tenant_id"] -+ ))) -+ -+ # Update caps for guestclient_1 out of band -+ out = self.fs.mon_manager.raw_cluster_cmd( -+ "auth", "caps", "client.guest1", -+ "mds", "allow rw path=/volumes/groupid, allow rw path=/volumes/groupid/volumeid", -+ "osd", "allow rw pool=cephfs_data namespace=fsvolumens_volumeid", -+ "mon", "allow r", -+ "mgr", "allow *" -+ ) -+ -+ # Deauthorize guestclient_1 -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.deauthorize(vp, "{guest_entity}") -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ guest_entity=guestclient_1["auth_id"] -+ ))) -+ -+ # Validate the caps of guestclient_1 after deauthorize. It should not have deleted -+ # guestclient_1. The mgr and mds caps should be present which was updated out of band. -+ out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty")) -+ -+ self.assertEqual("client.guest1", out[0]["entity"]) -+ self.assertEqual("allow rw path=/volumes/groupid", out[0]["caps"]["mds"]) -+ self.assertEqual("allow *", out[0]["caps"]["mgr"]) -+ self.assertNotIn("osd", out[0]["caps"]) -+ -+ # Delete volume -+ self._volume_client_python(volumeclient_mount, dedent(""" -+ vp = VolumePath("{group_id}", "{volume_id}") -+ vc.delete_volume(vp) -+ """.format( -+ group_id=group_id, -+ volume_id=volume_id, -+ ))) -+ - def test_recover_metadata(self): - """ - That volume client can recover from partial auth updates using -@@ -1078,7 +1283,7 @@ vc.disconnect() - guest_mount.umount_wait() - - # Set auth caps for the auth ID using the volumeclient -- self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path) -+ self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path, allow_existing_id=True) - - # Mount the volume in the guest using the auth ID to assert that the - # auth caps are valid --- -2.23.0 - diff --git a/0006-CVE-2021-3524-1.patch b/0006-CVE-2021-3524-1.patch deleted file mode 100644 index f3049837175d180319e52935fe3159b18285f330..0000000000000000000000000000000000000000 --- a/0006-CVE-2021-3524-1.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 763aebb94678018f89427137ffbc0c5205b1edc1 Mon Sep 17 00:00:00 2001 -From: Casey Bodley -Date: Tue, 4 May 2021 08:32:58 -0400 -Subject: [PATCH] rgw: sanitize \r in s3 CORSConfiguration's ExposeHeader - -follows up on 1524d3c0c5cb11775313ea1e2bb36a93257947f2 to escape \r as -well - -Fixes: CVE-2021-3524 - -Reported-by: Sergey Bobrov -Signed-off-by: Casey Bodley -(cherry picked from commit 87806f48e7a1b8891eb90711f1cedd26f1119aac) ---- - src/rgw/rgw_cors.cc | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/src/rgw/rgw_cors.cc b/src/rgw/rgw_cors.cc -index 0b3e4f39455..bfe83d6420e 100644 ---- a/src/rgw/rgw_cors.cc -+++ b/src/rgw/rgw_cors.cc -@@ -148,8 +148,9 @@ void RGWCORSRule::format_exp_headers(string& s) { - if (s.length() > 0) - s.append(","); - // these values are sent to clients in a 'Access-Control-Expose-Headers' -- // response header, so we escape '\n' to avoid header injection -- boost::replace_all_copy(std::back_inserter(s), header, "\n", "\\n"); -+ // response header, so we escape '\n' and '\r' to avoid header injection -+ std::string tmp = boost::replace_all_copy(header, "\n", "\\n"); -+ boost::replace_all_copy(std::back_inserter(s), tmp, "\r", "\\r"); - } - } - --- -2.23.0 - diff --git a/ceph-14.2.15.tar.gz b/ceph-16.2.7.tar.gz similarity index 78% rename from ceph-14.2.15.tar.gz rename to ceph-16.2.7.tar.gz index 67b858f07bd7e4ddaa94f95c282f020b67bf256a..6449234f692c3c80efe16d43a14e1f6862cf7cfb 100644 Binary files a/ceph-14.2.15.tar.gz and b/ceph-16.2.7.tar.gz differ diff --git a/ceph.spec b/ceph.spec index 04488f7f9a05a6976fcfbca84b1d4411e48039b2..403fc0799f90ba8cf23ba687f0457f37112e100b 100644 --- a/ceph.spec +++ b/ceph.spec @@ -1,4 +1,3 @@ -# vim: set noexpandtab ts=8 sw=8 : # # spec file for package ceph # @@ -22,56 +21,76 @@ # bcond syntax! ################################################################################# %bcond_with make_check +%bcond_with zbd +%bcond_with cmake_verbose_logging %bcond_without ceph_test_package -%ifarch s390 s390x +%ifarch s390 %bcond_with tcmalloc %else %bcond_without tcmalloc %endif +%bcond_with system_pmdk %if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} %bcond_without selinux +%ifarch x86_64 ppc64le +%bcond_without rbd_rwl_cache +%bcond_without rbd_ssd_cache +%else +%bcond_with rbd_rwl_cache +%bcond_with rbd_ssd_cache +%endif %if 0%{?rhel} >= 8 || 0%{?openEuler} %bcond_with cephfs_java %else %bcond_without cephfs_java %endif %bcond_without amqp_endpoint +%bcond_without kafka_endpoint %bcond_without lttng %bcond_without libradosstriper %bcond_without ocf -%bcond_without kafka_endpoint +%global luarocks_package_name luarocks +%bcond_without lua_packages %global _remote_tarball_prefix https://download.ceph.com/tarballs/ %endif %if 0%{?suse_version} -%bcond_with selinux -%bcond_with cephfs_java %bcond_with amqp_endpoint +%bcond_with cephfs_java %bcond_with kafka_endpoint -#Compat macro for new _fillupdir macro introduced in Nov 2017 +%bcond_with libradosstriper +%ifarch x86_64 aarch64 ppc64le +%bcond_without lttng +%bcond_without rbd_rwl_cache +%bcond_without rbd_ssd_cache +%else +%bcond_with lttng +%bcond_with rbd_rwl_cache +%bcond_with rbd_ssd_cache +%endif +%bcond_with ocf +%bcond_with selinux +#Compat macro for _fillupdir macro introduced in Nov 2017 %if ! %{defined _fillupdir} %global _fillupdir /var/adm/fillup-templates %endif +#luarocks %if 0%{?is_opensuse} -%bcond_without libradosstriper -%bcond_without ocf +# openSUSE +%bcond_without lua_packages +%if 0%{?sle_version} +# openSUSE Leap +%global luarocks_package_name lua53-luarocks %else -%bcond_with libradosstriper -%bcond_with ocf +# openSUSE Tumbleweed +%global luarocks_package_name lua54-luarocks %endif -%ifarch x86_64 aarch64 ppc64le -%bcond_without lttng %else -%bcond_with lttng +# SLE +%bcond_with lua_packages %endif %endif %bcond_with seastar -%if 0%{?fedora} >= 29 || 0%{?suse_version} >= 1500 || 0%{?rhel} >= 8 || 0%{?openEuler} -# distros that need a py3 Ceph build -%bcond_with python3 -%else -# distros that need a py2 Ceph build -%bcond_without python2 -%endif +%bcond_with jaeger %if 0%{?fedora} || 0%{?suse_version} >= 1500 # distros that ship cmd2 and/or colorama %bcond_without cephfs_shell @@ -79,28 +98,24 @@ # distros that do _not_ ship cmd2/colorama %bcond_with cephfs_shell %endif -%if 0%{without python2} -%global _defined_if_python2_absent 1 -%endif %if 0%{?fedora} || 0%{?suse_version} || 0%{?rhel} >= 8 || 0%{?openEuler} %global weak_deps 1 %endif %if %{with selinux} # get selinux policy version +# Force 0.0.0 policy version for centos builds to avoid repository sync issues between rhel and centos +%if 0%{?centos} +%global _selinux_policy_version 0.0.0 +%else %{!?_selinux_policy_version: %global _selinux_policy_version 0.0.0} %endif +%endif %{!?_udevrulesdir: %global _udevrulesdir /lib/udev/rules.d} %{!?tmpfiles_create: %global tmpfiles_create systemd-tmpfiles --create} %{!?python3_pkgversion: %global python3_pkgversion 3} %{!?python3_version_nodots: %global python3_version_nodots 3} %{!?python3_version: %global python3_version 3} -# define _python_buildid macro which will expand to the empty string when -# building with python2 -%global _python_buildid %{python3_pkgversion} - -# unify libexec for all targets -%global _libexecdir %{_exec_prefix}/lib # disable dwz which compresses the debuginfo %global _find_debuginfo_dwz_opts %{nil} @@ -109,8 +124,8 @@ # main package definition ################################################################################# Name: ceph -Version: 14.2.15 -Release: 5%{?dist} +Version: 16.2.7 +Release: 0 %if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} Epoch: 2 %endif @@ -120,22 +135,16 @@ Epoch: 2 %global _epoch_prefix %{?epoch:%{epoch}:} Summary: User space components of the Ceph file system -License: LGPL-2.1 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD-3-Clause and MIT +License: LGPL-2.1-only and LGPL-3.0-only and CC-BY-SA-3.0 and GPL-2.0-only and GPL-2.0-or-later and BSL-1.0 and BSD-3-Clause and MIT %if 0%{?suse_version} Group: System/Filesystems %endif URL: http://ceph.com/ -Source0: %{?_remote_tarball_prefix}ceph-14.2.15.tar.gz +Source0: %{?_remote_tarball_prefix}ceph-16.2.7.tar.gz # backport -Patch0: 0001-cmake-detect-and-use-sigdescr_np-if-available.patch -# backport for cves -Patch1: 0001-CVE-2020-27781-1.patch -Patch2: 0002-CVE-2020-27781-2.patch -Patch3: 0003-CVE-2020-27781-3.patch -Patch4: 0004-CVE-2020-27781-4.patch -Patch5: 0005-CVE-2020-27781-5.patch -Patch6: 0006-CVE-2021-3524-1.patch +Patch0: 0001-fix-error-transform-is-not-a-member-of-std.patch +Patch1: 0002-enable-install-deps-in-openEuler.patch %if 0%{?suse_version} # _insert_obs_source_lines_here @@ -158,23 +167,20 @@ BuildRequires: checkpolicy BuildRequires: selinux-policy-devel %endif BuildRequires: gperf -%if 0%{?rhel} == 7 -BuildRequires: cmake3 > 3.5 -%else BuildRequires: cmake > 3.5 -%endif BuildRequires: cryptsetup BuildRequires: fuse-devel -%if 0%{?rhel} == 7 -# devtoolset offers newer make and valgrind-devel, but the old ones are good -# enough. -BuildRequires: devtoolset-8-gcc-c++ >= 8.2.1 +%if 0%{with seastar} +BuildRequires: gcc-toolset-9-gcc-c++ >= 9.2.1-2.3 %else BuildRequires: gcc-c++ %endif -BuildRequires: gdbm %if 0%{with tcmalloc} -%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} +# libprofiler did not build on ppc64le until 2.7.90 +%if 0%{?fedora} || 0%{?rhel} >= 8 || 0%{?openEuler} +BuildRequires: gperftools-devel >= 2.7.90 +%endif +%if 0%{?rhel} && 0%{?rhel} < 8 BuildRequires: gperftools-devel >= 2.6.1 %endif %if 0%{?suse_version} @@ -184,8 +190,11 @@ BuildRequires: gperftools-devel >= 2.4 BuildRequires: leveldb-devel > 1.2 BuildRequires: libaio-devel BuildRequires: libblkid-devel >= 2.17 +BuildRequires: cryptsetup-devel BuildRequires: libcurl-devel BuildRequires: libcap-ng-devel +BuildRequires: fmt-devel +#BuildRequires: pkgconfig(libudev) BuildRequires: libudev-devel BuildRequires: libnl3-devel BuildRequires: liboath-devel @@ -193,40 +202,66 @@ BuildRequires: libtool BuildRequires: libxml2-devel BuildRequires: make BuildRequires: ncurses-devel +BuildRequires: libicu-devel BuildRequires: parted BuildRequires: patch BuildRequires: perl BuildRequires: pkgconfig BuildRequires: procps -BuildRequires: python%{_python_buildid} -BuildRequires: python%{_python_buildid}-devel +BuildRequires: python%{python3_pkgversion} +BuildRequires: python%{python3_pkgversion}-devel BuildRequires: snappy-devel +BuildRequires: sqlite-devel BuildRequires: sudo -BuildRequires: udev +BuildRequires: pkgconfig(udev) BuildRequires: util-linux BuildRequires: valgrind-devel BuildRequires: which BuildRequires: xfsprogs BuildRequires: xfsprogs-devel BuildRequires: xmlstarlet -BuildRequires: yasm +BuildRequires: nasm +BuildRequires: lua-devel %if 0%{with amqp_endpoint} BuildRequires: librabbitmq-devel %endif %if 0%{with kafka_endpoint} BuildRequires: librdkafka-devel %endif +%if 0%{with lua_packages} +BuildRequires: %{luarocks_package_name} +%endif %if 0%{with make_check} BuildRequires: jq BuildRequires: libuuid-devel -BuildRequires: python%{_python_buildid}-bcrypt -BuildRequires: python%{_python_buildid}-nose -BuildRequires: python%{_python_buildid}-pecan -BuildRequires: python%{_python_buildid}-requests -BuildRequires: python%{_python_buildid}-six -BuildRequires: python%{_python_buildid}-virtualenv +BuildRequires: python%{python3_pkgversion}-bcrypt +BuildRequires: python%{python3_pkgversion}-nose +BuildRequires: python%{python3_pkgversion}-pecan +BuildRequires: python%{python3_pkgversion}-requests +BuildRequires: python%{python3_pkgversion}-dateutil +BuildRequires: python%{python3_pkgversion}-coverage +BuildRequires: python%{python3_pkgversion}-pyOpenSSL BuildRequires: socat %endif +%if 0%{with zbd} +BuildRequires: libzbd-devel +%endif +%if 0%{with jaeger} +BuildRequires: bison +BuildRequires: flex +%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} +BuildRequires: json-devel +%endif +%if 0%{?suse_version} +BuildRequires: nlohmann_json-devel +%endif +BuildRequires: libevent-devel +BuildRequires: yaml-cpp-devel +%endif +%if 0%{with system_pmdk} +BuildRequires: libpmem-devel +BuildRequires: libpmemobj-devel +%endif %if 0%{with seastar} BuildRequires: c-ares-devel BuildRequires: gnutls-devel @@ -237,6 +272,17 @@ BuildRequires: protobuf-devel BuildRequires: ragel BuildRequires: systemtap-sdt-devel BuildRequires: yaml-cpp-devel +%if 0%{?fedora} +BuildRequires: libubsan +BuildRequires: libasan +BuildRequires: libatomic +%endif +%if 0%{?rhel} +BuildRequires: gcc-toolset-9-annobin +BuildRequires: gcc-toolset-9-libubsan-devel +BuildRequires: gcc-toolset-9-libasan-devel +BuildRequires: gcc-toolset-9-libatomic-devel +%endif %endif ################################################################################# # distro-conditional dependencies @@ -252,15 +298,14 @@ BuildRequires: libbz2-devel BuildRequires: mozilla-nss-devel BuildRequires: keyutils-devel BuildRequires: libopenssl-devel -BuildRequires: lsb-release BuildRequires: openldap2-devel #BuildRequires: krb5 #BuildRequires: krb5-devel BuildRequires: cunit-devel -BuildRequires: python%{_python_buildid}-setuptools -BuildRequires: python%{_python_buildid}-Cython -BuildRequires: python%{_python_buildid}-PrettyTable -BuildRequires: python%{_python_buildid}-Sphinx +BuildRequires: python%{python3_pkgversion}-setuptools +BuildRequires: python%{python3_pkgversion}-Cython +BuildRequires: python%{python3_pkgversion}-PrettyTable +BuildRequires: python%{python3_pkgversion}-Sphinx BuildRequires: rdma-core-devel BuildRequires: liblz4-devel >= 1.7 # for prometheus-alerts @@ -277,48 +322,46 @@ BuildRequires: openldap-devel #BuildRequires: krb5-devel BuildRequires: openssl-devel BuildRequires: CUnit-devel -#BuildRequires: redhat-lsb-core BuildRequires: python%{python3_pkgversion}-devel BuildRequires: python%{python3_pkgversion}-setuptools -%if 0%{?rhel} == 7 -BuildRequires: python%{python3_version_nodots}-Cython -%else BuildRequires: python%{python3_pkgversion}-Cython -%endif -BuildRequires: python%{_python_buildid}-prettytable -BuildRequires: python%{_python_buildid}-sphinx +BuildRequires: python%{python3_pkgversion}-prettytable +BuildRequires: python%{python3_pkgversion}-sphinx BuildRequires: lz4-devel >= 1.7 %endif # distro-conditional make check dependencies %if 0%{with make_check} %if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} -BuildRequires: python%{_python_buildid}-coverage -BuildRequires: python%{_python_buildid}-pecan -BuildRequires: python%{_python_buildid}-tox -BuildRequires: xmlsec1 -%if 0%{?rhel} == 7 -BuildRequires: pyOpenSSL%{_python_buildid} -%else -BuildRequires: python%{_python_buildid}-pyOpenSSL -%endif -BuildRequires: python%{_python_buildid}-cherrypy -BuildRequires: python%{_python_buildid}-jwt -BuildRequires: python%{_python_buildid}-routes -BuildRequires: python%{_python_buildid}-scipy -BuildRequires: python%{_python_buildid}-werkzeug -%endif -%if 0%{?suse_version} -BuildRequires: python%{_python_buildid}-CherryPy -BuildRequires: python%{_python_buildid}-PyJWT -BuildRequires: python%{_python_buildid}-Routes -BuildRequires: python%{_python_buildid}-Werkzeug -BuildRequires: python%{_python_buildid}-coverage -BuildRequires: python%{_python_buildid}-numpy-devel -BuildRequires: python%{_python_buildid}-pecan -BuildRequires: python%{_python_buildid}-pyOpenSSL -BuildRequires: python%{_python_buildid}-tox -BuildRequires: rpm-build -BuildRequires: xmlsec1-devel +BuildRequires: golang-github-prometheus +BuildRequires: jsonnet +BuildRequires: libtool-ltdl-devel +BuildRequires: xmlsec1 +BuildRequires: xmlsec1-devel +%ifarch x86_64 +BuildRequires: xmlsec1-nss +%endif +BuildRequires: xmlsec1-openssl +BuildRequires: xmlsec1-openssl-devel +BuildRequires: python%{python3_pkgversion}-cherrypy +BuildRequires: python%{python3_pkgversion}-jwt +BuildRequires: python%{python3_pkgversion}-routes +BuildRequires: python%{python3_pkgversion}-scipy +BuildRequires: python%{python3_pkgversion}-werkzeug +BuildRequires: python%{python3_pkgversion}-pyOpenSSL +%endif +%if 0%{?suse_version} +BuildRequires: golang-github-prometheus-prometheus +BuildRequires: jsonnet +BuildRequires: libxmlsec1-1 +BuildRequires: libxmlsec1-nss1 +BuildRequires: libxmlsec1-openssl1 +BuildRequires: python%{python3_pkgversion}-CherryPy +BuildRequires: python%{python3_pkgversion}-PyJWT +BuildRequires: python%{python3_pkgversion}-Routes +BuildRequires: python%{python3_pkgversion}-Werkzeug +BuildRequires: python%{python3_pkgversion}-numpy-devel +BuildRequires: xmlsec1-devel +BuildRequires: xmlsec1-openssl-devel %endif %endif # lttng and babeltrace for rbd-replay-prep @@ -389,11 +432,11 @@ Requires: grep Requires: logrotate Requires: parted Requires: psmisc -Requires: python%{_python_buildid}-setuptools +Requires: python%{python3_pkgversion}-setuptools Requires: util-linux Requires: xfsprogs Requires: which -%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} +%if 0%{?rhel} && 0%{?rhel} < 8 # The following is necessary due to tracker 36508 and can be removed once the # associated upstream bugs are resolved. %if 0%{with tcmalloc} @@ -402,10 +445,28 @@ Requires: gperftools-libs >= 2.6.1 %endif %if 0%{?weak_deps} Recommends: chrony +Recommends: nvme-cli +%if 0%{?suse_version} +Requires: smartmontools +%else +Recommends: smartmontools +%endif %endif %description base Base is the package that includes all the files shared amongst ceph servers +%package -n cephadm +Summary: Utility to bootstrap Ceph clusters +BuildArch: noarch +Requires: lvm2 +Requires: python%{python3_pkgversion} +%if 0%{?weak_deps} +Recommends: podman >= 2.0.2 +%endif +%description -n cephadm +Utility to bootstrap a Ceph cluster and manage Ceph daemons deployed +with systemd and podman. + %package -n ceph-common Summary: Ceph Common %if 0%{?suse_version} @@ -414,16 +475,20 @@ Group: System/Filesystems Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} Requires: librados2 = %{_epoch_prefix}%{version}-%{release} Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} -Requires: python%{_python_buildid}-rados = %{_epoch_prefix}%{version}-%{release} -Requires: python%{_python_buildid}-rbd = %{_epoch_prefix}%{version}-%{release} -Requires: python%{_python_buildid}-cephfs = %{_epoch_prefix}%{version}-%{release} -Requires: python%{_python_buildid}-rgw = %{_epoch_prefix}%{version}-%{release} -Requires: python%{_python_buildid}-ceph-argparse = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-rbd = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-cephfs = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-rgw = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-ceph-argparse = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release} +%if 0%{with jaeger} +Requires: libjaeger = %{_epoch_prefix}%{version}-%{release} +%endif %if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} -Requires: python%{_python_buildid}-prettytable +Requires: python%{python3_pkgversion}-prettytable %endif %if 0%{?suse_version} -Requires: python%{_python_buildid}-PrettyTable +Requires: python%{python3_pkgversion}-PrettyTable %endif %if 0%{with libradosstriper} Requires: libradosstriper1 = %{_epoch_prefix}%{version}-%{release} @@ -454,6 +519,9 @@ Group: System/Filesystems %endif Provides: ceph-test:/usr/bin/ceph-monstore-tool Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +%if 0%{with jaeger} +Requires: libjaeger = %{_epoch_prefix}%{version}-%{release} +%endif %description mon ceph-mon is the cluster monitor daemon for the Ceph distributed file system. One or more instances of ceph-mon form a Paxos part-time @@ -466,31 +534,14 @@ Summary: Ceph Manager Daemon Group: System/Filesystems %endif Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} -Requires: python%{_python_buildid}-bcrypt -Requires: python%{_python_buildid}-pecan -Requires: python%{_python_buildid}-requests -Requires: python%{_python_buildid}-six -%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} -Requires: python%{_python_buildid}-cherrypy -Requires: python%{_python_buildid}-werkzeug -%endif -%if 0%{?suse_version} -Requires: python%{_python_buildid}-CherryPy -Requires: python%{_python_buildid}-Werkzeug -%endif +Requires: ceph-mgr-modules-core = %{_epoch_prefix}%{version}-%{release} +Requires: libcephsqlite = %{_epoch_prefix}%{version}-%{release} %if 0%{?weak_deps} Recommends: ceph-mgr-dashboard = %{_epoch_prefix}%{version}-%{release} Recommends: ceph-mgr-diskprediction-local = %{_epoch_prefix}%{version}-%{release} -Recommends: ceph-mgr-diskprediction-cloud = %{_epoch_prefix}%{version}-%{release} -Recommends: ceph-mgr-rook = %{_epoch_prefix}%{version}-%{release} Recommends: ceph-mgr-k8sevents = %{_epoch_prefix}%{version}-%{release} -Recommends: ceph-mgr-ssh = %{_epoch_prefix}%{version}-%{release} -Recommends: python%{_python_buildid}-influxdb -%endif -%if 0%{?rhel} == 7 -Requires: pyOpenSSL -%else -Requires: python%{_python_buildid}-pyOpenSSL +Recommends: ceph-mgr-cephadm = %{_epoch_prefix}%{version}-%{release} +Recommends: python%{python3_pkgversion}-influxdb %endif %description mgr ceph-mgr enables python modules that provide services (such as the REST @@ -498,35 +549,126 @@ module derived from Calamari) and expose CLI hooks. ceph-mgr gathers the cluster maps, the daemon metadata, and performance counters, and exposes all these to the python modules. -%package mgr-diskprediction-local -Summary: ceph-mgr diskprediction_local plugin +%package mgr-dashboard +Summary: Ceph Dashboard BuildArch: noarch %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} -%if 0%{?fedora} || 0%{?rhel} > 7 || 0%{?suse_version} || 0%{?openEuler} -Requires: python%{_python_buildid}-numpy -Requires: python3-scipy +Requires: ceph-grafana-dashboards = %{_epoch_prefix}%{version}-%{release} +Requires: ceph-prometheus-alerts = %{_epoch_prefix}%{version}-%{release} +%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} +Requires: python%{python3_pkgversion}-cherrypy +Requires: python%{python3_pkgversion}-jwt +Requires: python%{python3_pkgversion}-routes +Requires: python%{python3_pkgversion}-werkzeug +%if 0%{?weak_deps} +Recommends: python%{python3_pkgversion}-saml +%endif +%endif +%if 0%{?suse_version} +Requires: python%{python3_pkgversion}-CherryPy +Requires: python%{python3_pkgversion}-PyJWT +Requires: python%{python3_pkgversion}-Routes +Requires: python%{python3_pkgversion}-Werkzeug +Recommends: python%{python3_pkgversion}-python3-saml +%endif +%description mgr-dashboard +ceph-mgr-dashboard is a manager module, providing a web-based application +to monitor and manage many aspects of a Ceph cluster and related components. +See the Dashboard documentation at http://docs.ceph.com/ for details and a +detailed feature overview. + +%package mgr-diskprediction-local +Summary: Ceph Manager module for predicting disk failures +BuildArch: noarch +%if 0%{?suse_version} +Group: System/Filesystems %endif -%if 0%{?rhel} == 7 -Requires: numpy -Requires: scipy +Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-numpy +%if 0%{?fedora} || 0%{?suse_version} +Requires: python%{python3_pkgversion}-scikit-learn %endif +Requires: python3-scipy %description mgr-diskprediction-local -ceph-mgr-diskprediction-local is a ceph-mgr plugin that tries to predict +ceph-mgr-diskprediction-local is a ceph-mgr module that tries to predict disk failures using local algorithms and machine-learning databases. -%package mgr-diskprediction-cloud -Summary: ceph-mgr diskprediction_cloud plugin +%package mgr-modules-core +Summary: Ceph Manager modules which are always enabled +BuildArch: noarch +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: python%{python3_pkgversion}-bcrypt +Requires: python%{python3_pkgversion}-pecan +Requires: python%{python3_pkgversion}-pyOpenSSL +Requires: python%{python3_pkgversion}-requests +Requires: python%{python3_pkgversion}-dateutil +%if 0%{?fedora} || 0%{?rhel} >= 8 || 0%{?openEuler} +Requires: python%{python3_pkgversion}-cherrypy +Requires: python%{python3_pkgversion}-pyyaml +Requires: python%{python3_pkgversion}-werkzeug +%endif +%if 0%{?suse_version} +Requires: python%{python3_pkgversion}-CherryPy +Requires: python%{python3_pkgversion}-PyYAML +Requires: python%{python3_pkgversion}-Werkzeug +%endif +%if 0%{?weak_deps} +Recommends: ceph-mgr-rook = %{_epoch_prefix}%{version}-%{release} +%endif +%description mgr-modules-core +ceph-mgr-modules-core provides a set of modules which are always +enabled by ceph-mgr. + +%package mgr-rook +BuildArch: noarch +Summary: Ceph Manager module for Rook-based orchestration +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-kubernetes +Requires: python%{python3_pkgversion}-jsonpatch +%description mgr-rook +ceph-mgr-rook is a ceph-mgr module for orchestration functions using +a Rook backend. + +%package mgr-k8sevents BuildArch: noarch +Summary: Ceph Manager module to orchestrate ceph-events to kubernetes' events API +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-kubernetes +%description mgr-k8sevents +ceph-mgr-k8sevents is a ceph-mgr module that sends every ceph-events +to kubernetes' events API + +%package mgr-cephadm +Summary: Ceph Manager module for cephadm-based orchestration +BuildArch: noarch %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} -%description mgr-diskprediction-cloud -ceph-mgr-diskprediction-cloud is a ceph-mgr plugin that tries to predict -disk failures using services in the Google cloud. +Requires: python%{python3_pkgversion}-remoto +Requires: cephadm = %{_epoch_prefix}%{version}-%{release} +%if 0%{?suse_version} +Requires: openssh +Requires: python%{python3_pkgversion}-Jinja2 +%endif +%if 0%{?rhel} || 0%{?fedora} || 0%{?openEuler} +Requires: openssh-clients +Requires: python%{python3_pkgversion}-jinja2 +%endif +%description mgr-cephadm +ceph-mgr-cephadm is a ceph-mgr module for orchestration functions using +the integrated cephadm deployment tool management operations. %package fuse Summary: Ceph fuse-based client @@ -538,6 +680,17 @@ Requires: python%{python3_pkgversion} %description fuse FUSE based client for Ceph distributed network file system +%package -n cephfs-mirror +Summary: Ceph daemon for mirroring CephFS snapshots +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +Requires: librados2 = %{_epoch_prefix}%{version}-%{release} +Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} +%description -n cephfs-mirror +Daemon for mirroring CephFS snapshots between Ceph clusters. + %package -n rbd-fuse Summary: Ceph fuse-based client %if 0%{?suse_version} @@ -560,6 +713,16 @@ Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} Daemon for mirroring RBD images between Ceph clusters, streaming changes asynchronously. +%package immutable-object-cache +Summary: Ceph daemon for immutable object cache +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +Requires: librados2 = %{_epoch_prefix}%{version}-%{release} +%description immutable-object-cache +Daemon for immutable object cache. + %package -n rbd-nbd Summary: Ceph RBD client base on NBD %if 0%{?suse_version} @@ -584,12 +747,23 @@ Requires: librgw2 = %{_epoch_prefix}%{version}-%{release} %if 0%{?rhel} || 0%{?fedora} || 0%{?openEuler} Requires: mailcap %endif +%if 0%{?weak_deps} +Recommends: gawk +%endif %description radosgw RADOS is a distributed object store used by the Ceph distributed storage system. This package provides a REST gateway to the object store that aims to implement a superset of Amazon's S3 service as well as the OpenStack Object Storage ("Swift") API. +%package -n cephfs-top +Summary: top(1) like utility for Ceph Filesystem +BuildArch: noarch +Requires: python%{python3_pkgversion}-rados +%description -n cephfs-top +This package provides a top(1) like utility to display Ceph Filesystem metrics +in realtime. + %if %{with ocf} %package resource-agents Summary: OCF-compliant resource agents for Ceph daemons @@ -613,12 +787,26 @@ Provides: ceph-test:/usr/bin/ceph-osdomap-tool Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} Requires: lvm2 Requires: sudo -Requires: libstoragemgmt +Requires: libstoragemgmt +Requires: python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release} %description osd ceph-osd is the object storage daemon for the Ceph distributed file system. It is responsible for storing objects on a local file system and providing access to them over the network. +%if 0%{with seastar} +%package crimson-osd +Summary: Ceph Object Storage Daemon (crimson) +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-osd = %{_epoch_prefix}%{version}-%{release} +%description crimson-osd +crimson-osd is the object storage daemon for the Ceph distributed file +system. It is responsible for storing objects on a local file system +and providing access to them over the network. +%endif + %package -n librados2 Summary: RADOS distributed object store client library %if 0%{?suse_version} @@ -688,8 +876,10 @@ Group: Development/Libraries/Python Requires: librgw2 = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release} %{?python_provide:%python_provide python%{python3_pkgversion}-rgw} +Provides: python-rgw = %{_epoch_prefix}%{version}-%{release} +Obsoletes: python-rgw < %{_epoch_prefix}%{version}-%{release} %description -n python%{python3_pkgversion}-rgw -This package contains Python 3 libraries for interacting with Cephs RADOS +This package contains Python 3 libraries for interacting with Ceph RADOS gateway. %package -n python%{python3_pkgversion}-rados @@ -700,10 +890,38 @@ Group: Development/Libraries/Python Requires: python%{python3_pkgversion} Requires: librados2 = %{_epoch_prefix}%{version}-%{release} %{?python_provide:%python_provide python%{python3_pkgversion}-rados} +Provides: python-rados = %{_epoch_prefix}%{version}-%{release} +Obsoletes: python-rados < %{_epoch_prefix}%{version}-%{release} %description -n python%{python3_pkgversion}-rados -This package contains Python 3 libraries for interacting with Cephs RADOS +This package contains Python 3 libraries for interacting with Ceph RADOS object store. +%package -n libcephsqlite +Summary: SQLite3 VFS for Ceph +%if 0%{?suse_version} +Group: System/Libraries +%endif +Requires: librados2 = %{_epoch_prefix}%{version}-%{release} +%description -n libcephsqlite +A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS +distributed object store. + +%package -n libcephsqlite-devel +Summary: SQLite3 VFS for Ceph headers +%if 0%{?suse_version} +Group: Development/Libraries/C and C++ +%endif +Requires: sqlite-devel +Requires: libcephsqlite = %{_epoch_prefix}%{version}-%{release} +Requires: librados-devel = %{_epoch_prefix}%{version}-%{release} +Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release} +Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release} +Provides: libcephsqlite-devel = %{_epoch_prefix}%{version}-%{release} +Obsoletes: libcephsqlite-devel < %{_epoch_prefix}%{version}-%{release} +%description -n libcephsqlite-devel +A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS +distributed object store. + %if 0%{with libradosstriper} %package -n libradosstriper1 Summary: RADOS striping interface @@ -773,9 +991,10 @@ Group: Development/Libraries/Python Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release} %{?python_provide:%python_provide python%{python3_pkgversion}-rbd} -Provides: python3-rbd = %{_epoch_prefix}%{version}-%{release} +Provides: python-rbd = %{_epoch_prefix}%{version}-%{release} +Obsoletes: python-rbd < %{_epoch_prefix}%{version}-%{release} %description -n python%{python3_pkgversion}-rbd -This package contains Python 3 libraries for interacting with Cephs RADOS +This package contains Python 3 libraries for interacting with Ceph RADOS block device. %package -n libcephfs2 @@ -806,7 +1025,21 @@ Provides: libcephfs2-devel = %{_epoch_prefix}%{version}-%{release} Obsoletes: libcephfs2-devel < %{_epoch_prefix}%{version}-%{release} %description -n libcephfs-devel This package contains libraries and headers needed to develop programs -that use Cephs distributed file system. +that use Ceph distributed file system. + +%if 0%{with jaeger} +%package -n libjaeger +Summary: Ceph distributed file system tracing library +%if 0%{?suse_version} +Group: System/Libraries +%endif +Provides: libjaegertracing.so.0()(64bit) +Provides: libopentracing.so.1()(64bit) +Provides: libthrift.so.0.13.0()(64bit) +%description -n libjaeger +This package contains libraries needed to provide distributed +tracing for Ceph. +%endif %package -n python%{python3_pkgversion}-cephfs Summary: Python 3 libraries for Ceph distributed file system @@ -817,8 +1050,10 @@ Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-ceph-argparse = %{_epoch_prefix}%{version}-%{release} %{?python_provide:%python_provide python%{python3_pkgversion}-cephfs} +Provides: python-cephfs = %{_epoch_prefix}%{version}-%{release} +Obsoletes: python-cephfs < %{_epoch_prefix}%{version}-%{release} %description -n python%{python3_pkgversion}-cephfs -This package contains Python 3 libraries for interacting with Cephs distributed +This package contains Python 3 libraries for interacting with Ceph distributed file system. %package -n python%{python3_pkgversion}-ceph-argparse @@ -833,6 +1068,22 @@ well as the RESTful interface. These have to do with querying the daemons for command-description information, validating user command input against those descriptions, and submitting the command to the appropriate daemon. +%package -n python%{python3_pkgversion}-ceph-common +Summary: Python 3 utility libraries for Ceph +%if 0%{?fedora} || 0%{?rhel} >= 8 || 0%{?openEuler} +Requires: python%{python3_pkgversion}-pyyaml +%endif +%if 0%{?suse_version} +Requires: python%{python3_pkgversion}-PyYAML +%endif +%if 0%{?suse_version} +Group: Development/Libraries/Python +%endif +%{?python_provide:%python_provide python%{python3_pkgversion}-ceph-common} +%description -n python%{python3_pkgversion}-ceph-common +This package contains data structures, classes and functions used by Ceph. +It also contains utilities used for the cephadm orchestrator. + %if 0%{with cephfs_shell} %package -n cephfs-shell Summary: Interactive shell for Ceph file system @@ -942,28 +1193,28 @@ collecting data from Ceph Manager "prometheus" module and Prometheus project "node_exporter" module. The dashboards are designed to be integrated with the Ceph Manager Dashboard web UI. -%if 0%{?suse_version} %package prometheus-alerts -Summary: Prometheus alerts for a Ceph deplyoment +Summary: Prometheus alerts for a Ceph deployment BuildArch: noarch Group: System/Monitoring %description prometheus-alerts -This package provides Ceph’s default alerts for Prometheus. -%endif +This package provides Ceph default alerts for Prometheus. ################################################################################# # common ################################################################################# %prep -%autosetup -p1 -n ceph-14.2.15 +%autosetup -p1 -n ceph-16.2.7 %build # LTO can be enabled as soon as the following GCC bug is fixed: # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=48200 %define _lto_cflags %{nil} -%if 0%{?rhel} == 7 -. /opt/rh/devtoolset-8/enable +%define _binaries_in_noarch_packages_terminate_build 0 + +%if 0%{with seastar} && 0%{?rhel} +. /opt/rh/gcc-toolset-9/enable %endif %if 0%{with cephfs_java} @@ -983,8 +1234,13 @@ export CFLAGS="$RPM_OPT_FLAGS" export CXXFLAGS="$RPM_OPT_FLAGS" export LDFLAGS="$RPM_LD_FLAGS" +%if 0%{with seastar} +# seastar uses longjmp() to implement coroutine. and this annoys longjmp_chk() +export CXXFLAGS=$(echo $RPM_OPT_FLAGS | sed -e 's/-Wp,-D_FORTIFY_SOURCE=2//g') +%endif + # Parallel build settings ... -CEPH_MFLAGS_JOBS="%{?_smp_mflags}" +CEPH_MFLAGS_JOBS="-j32" CEPH_SMP_NCPUS=$(echo "$CEPH_MFLAGS_JOBS" | sed 's/-j//') %if 0%{?__isa_bits} == 32 # 32-bit builds can use 3G memory max, which is not enough even for -j2 @@ -1009,11 +1265,7 @@ env | sort mkdir build cd build -%if 0%{?rhel} == 7 -CMAKE=cmake3 -%else CMAKE=cmake -%endif ${CMAKE} .. \ -DCMAKE_INSTALL_PREFIX=%{_prefix} \ -DCMAKE_INSTALL_LIBDIR=%{_libdir} \ @@ -1023,11 +1275,10 @@ ${CMAKE} .. \ -DCMAKE_INSTALL_MANDIR=%{_mandir} \ -DCMAKE_INSTALL_DOCDIR=%{_docdir}/ceph \ -DCMAKE_INSTALL_INCLUDEDIR=%{_includedir} \ + -DCMAKE_INSTALL_SYSTEMD_SERVICEDIR=%{_unitdir} \ -DWITH_MANPAGE=ON \ -DWITH_PYTHON3=%{python3_version} \ -DWITH_MGR_DASHBOARD_FRONTEND=OFF \ - -DWITH_PYTHON2=OFF \ - -DMGR_PYTHON_VERSION=3 \ %if 0%{without ceph_test_package} -DWITH_TESTS=OFF \ %endif @@ -1048,11 +1299,6 @@ ${CMAKE} .. \ %if 0%{with ocf} -DWITH_OCF=ON \ %endif -%ifarch aarch64 armv7hl mips mipsel ppc ppc64 ppc64le %{ix86} x86_64 - -DWITH_BOOST_CONTEXT=ON \ -%else - -DWITH_BOOST_CONTEXT=OFF \ -%endif %if 0%{with cephfs_shell} -DWITH_CEPHFS_SHELL=ON \ %endif @@ -1070,11 +1316,37 @@ ${CMAKE} .. \ -DWITH_RADOSGW_KAFKA_ENDPOINT=ON \ %else -DWITH_RADOSGW_KAFKA_ENDPOINT=OFF \ +%endif +%if 0%{without lua_packages} + -DWITH_RADOSGW_LUA_PACKAGES=OFF \ +%endif +%if 0%{with zbd} + -DWITH_ZBD=ON \ +%endif +%if 0%{with cmake_verbose_logging} + -DCMAKE_VERBOSE_MAKEFILE=ON \ +%endif +%if 0%{with rbd_rwl_cache} + -DWITH_RBD_RWL=ON \ +%endif +%if 0%{with rbd_ssd_cache} + -DWITH_RBD_SSD_CACHE=ON \ +%endif +%if 0%{with system_pmdk} + -DWITH_SYSTEM_PMDK:BOOL=ON \ %endif -DBOOST_J=$CEPH_SMP_NCPUS \ +%if 0%{?rhel} + -DWITH_FMT_HEADER_ONLY:BOOL=ON \ +%endif -DWITH_GRAFANA=ON -taskset -c 0-31 make "$CEPH_MFLAGS_JOBS" +%if %{with cmake_verbose_logging} +cat ./CMakeFiles/CMakeOutput.log +cat ./CMakeFiles/CMakeError.log +%endif + +make "$CEPH_MFLAGS_JOBS" %if 0%{with make_check} @@ -1091,6 +1363,12 @@ make DESTDIR=%{buildroot} install # we have dropped sysvinit bits rm -f %{buildroot}/%{_sysconfdir}/init.d/ceph popd + +%if 0%{with seastar} +# package crimson-osd with the name of ceph-osd +install -m 0755 %{buildroot}%{_bindir}/crimson-osd %{buildroot}%{_bindir}/ceph-osd +%endif + install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap %if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph @@ -1099,15 +1377,24 @@ install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_fillupdir}/sysconfig.%{name} %endif install -m 0644 -D systemd/ceph.tmpfiles.d %{buildroot}%{_tmpfilesdir}/ceph-common.conf -install -m 0644 -D systemd/50-ceph.preset %{buildroot}%{_libexecdir}/systemd/system-preset/50-ceph.preset +install -m 0644 -D systemd/50-ceph.preset %{buildroot}%{_presetdir}/50-ceph.preset mkdir -p %{buildroot}%{_sbindir} install -m 0644 -D src/logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d/ceph chmod 0644 %{buildroot}%{_docdir}/ceph/sample.ceph.conf install -m 0644 -D COPYING %{buildroot}%{_docdir}/ceph/COPYING install -m 0644 -D etc/sysctl/90-ceph-osd.conf %{buildroot}%{_sysctldir}/90-ceph-osd.conf +install -m 0755 -D src/tools/rbd_nbd/rbd-nbd_quiesce %{buildroot}%{_libexecdir}/rbd-nbd/rbd-nbd_quiesce + +install -m 0755 src/cephadm/cephadm %{buildroot}%{_sbindir}/cephadm +mkdir -p %{buildroot}%{_sharedstatedir}/cephadm +chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm +mkdir -p %{buildroot}%{_sharedstatedir}/cephadm/.ssh +chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm/.ssh +touch %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys +chmod 0600 %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys # firewall templates and /sbin/mount.ceph symlink -%if 0%{?suse_version} +%if 0%{?suse_version} && !0%{?usrmerged} mkdir -p %{buildroot}/sbin ln -sf %{_sbindir}/mount.ceph %{buildroot}/sbin/mount.ceph %endif @@ -1116,7 +1403,7 @@ ln -sf %{_sbindir}/mount.ceph %{buildroot}/sbin/mount.ceph install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules # sudoers.d -install -m 0600 -D sudoers.d/ceph-osd-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-osd-smartctl +install -m 0440 -D sudoers.d/ceph-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-smartctl %if 0%{?rhel} >= 8 || 0%{?openEuler} pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/* @@ -1142,11 +1429,12 @@ mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mgr mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd-mirror +# prometheus alerts +install -m 644 -D monitoring/prometheus/alerts/ceph_default_alerts.yml %{buildroot}/etc/prometheus/ceph/ceph_default_alerts.yml + %if 0%{?suse_version} # create __pycache__ directories and their contents %py3_compile %{buildroot}%{python3_sitelib} -# prometheus alerts -install -m 644 -D monitoring/prometheus/alerts/ceph_default_alerts.yml %{buildroot}/etc/prometheus/SUSE/default_rules/ceph_default_alerts.yml # hardlink duplicate files under /usr to save space %fdupes %{buildroot}%{_prefix} %endif @@ -1170,7 +1458,7 @@ rm -rf %{buildroot} %{_bindir}/osdmaptool %{_bindir}/ceph-kvstore-tool %{_bindir}/ceph-run -%{_libexecdir}/systemd/system-preset/50-ceph.preset +%{_presetdir}/50-ceph.preset %{_sbindir}/ceph-create-keys %dir %{_libexecdir}/ceph %{_libexecdir}/ceph/ceph_common.sh @@ -1216,6 +1504,7 @@ rm -rf %{buildroot} %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mgr %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd-mirror +%{_sysconfdir}/sudoers.d/ceph-smartctl %post base /sbin/ldconfig @@ -1242,21 +1531,25 @@ fi %postun base /sbin/ldconfig -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph.target -%endif -%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} %systemd_postun ceph.target + +%pre -n cephadm +getent group cephadm >/dev/null || groupadd -r cephadm +getent passwd cephadm >/dev/null || useradd -r -g cephadm -s /bin/bash -c "cephadm user for mgr/cephadm" -d %{_sharedstatedir}/cephadm cephadm +exit 0 + +%if ! 0%{?suse_version} +%postun -n cephadm +userdel -r cephadm || true +exit 0 %endif -if [ $1 -ge 1 ] ; then - # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to - # "yes". In any case: if units are not running, do not touch them. - SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph - if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then - source $SYSCONF_CEPH - fi -fi + +%files -n cephadm +%{_sbindir}/cephadm +%{_mandir}/man8/cephadm.8* +%attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm +%attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm/.ssh +%attr(0600,cephadm,cephadm) %{_sharedstatedir}/cephadm/.ssh/authorized_keys %files common %dir %{_docdir}/ceph @@ -1278,7 +1571,7 @@ fi %{_bindir}/rbd-replay-many %{_bindir}/rbdmap %{_sbindir}/mount.ceph -%if 0%{?suse_version} +%if 0%{?suse_version} && !0%{?usrmerged} /sbin/mount.ceph %endif %if %{with lttng} @@ -1384,13 +1677,7 @@ fi %endif %postun mds -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-mds@\*.service ceph-mds.target -%endif -%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} %systemd_postun ceph-mds@\*.service ceph-mds.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1406,40 +1693,11 @@ fi %files mgr %{_bindir}/ceph-mgr %dir %{_datadir}/ceph/mgr -%{_datadir}/ceph/mgr/alerts -%{_datadir}/ceph/mgr/ansible -%{_datadir}/ceph/mgr/balancer -%{_datadir}/ceph/mgr/crash -%{_datadir}/ceph/mgr/deepsea -%{_datadir}/ceph/mgr/devicehealth -%{_datadir}/ceph/mgr/influx -%{_datadir}/ceph/mgr/insights -%{_datadir}/ceph/mgr/iostat -%{_datadir}/ceph/mgr/localpool %{_datadir}/ceph/mgr/mgr_module.* %{_datadir}/ceph/mgr/mgr_util.* -%{_datadir}/ceph/mgr/orchestrator_cli -%{_datadir}/ceph/mgr/orchestrator.* -%{_datadir}/ceph/mgr/osd_perf_query -%{_datadir}/ceph/mgr/pg_autoscaler -%{_datadir}/ceph/mgr/progress -%{_datadir}/ceph/mgr/prometheus -%{_datadir}/ceph/mgr/rbd_support -%{_datadir}/ceph/mgr/restful -%{_datadir}/ceph/mgr/selftest -%{_datadir}/ceph/mgr/status -%{_datadir}/ceph/mgr/telegraf -%{_datadir}/ceph/mgr/telemetry -%{_datadir}/ceph/mgr/test_orchestrator -%{_datadir}/ceph/mgr/volumes -%{_datadir}/ceph/mgr/zabbix %{_unitdir}/ceph-mgr@.service %{_unitdir}/ceph-mgr.target %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mgr -%exclude %{_datadir}/ceph/mgr/rook -%exclude %{_datadir}/ceph/mgr/k8sevents -%exclude %{_datadir}/ceph/mgr/dashboard -%exclude %{_datadir}/ceph/mgr/ssh %post mgr %if 0%{?suse_version} @@ -1463,13 +1721,7 @@ fi %endif %postun mgr -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-mgr@\*.service ceph-mgr.target -%endif -%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} %systemd_postun ceph-mgr@\*.service ceph-mgr.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1482,6 +1734,19 @@ if [ $1 -ge 1 ] ; then fi fi +%files mgr-dashboard +%{_datadir}/ceph/mgr/dashboard + +%post mgr-dashboard +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%postun mgr-dashboard +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + %files mgr-diskprediction-local %{_datadir}/ceph/mgr/diskprediction_local @@ -1495,15 +1760,72 @@ if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi -%files mgr-diskprediction-cloud -%{_datadir}/ceph/mgr/diskprediction_cloud +%files mgr-modules-core +%dir %{_datadir}/ceph/mgr +%{_datadir}/ceph/mgr/alerts +%{_datadir}/ceph/mgr/balancer +%{_datadir}/ceph/mgr/crash +%{_datadir}/ceph/mgr/devicehealth +%{_datadir}/ceph/mgr/influx +%{_datadir}/ceph/mgr/insights +%{_datadir}/ceph/mgr/iostat +%{_datadir}/ceph/mgr/localpool +%{_datadir}/ceph/mgr/mds_autoscaler +%{_datadir}/ceph/mgr/mirroring +%{_datadir}/ceph/mgr/nfs +%{_datadir}/ceph/mgr/orchestrator +%{_datadir}/ceph/mgr/osd_perf_query +%{_datadir}/ceph/mgr/osd_support +%{_datadir}/ceph/mgr/pg_autoscaler +%{_datadir}/ceph/mgr/progress +%{_datadir}/ceph/mgr/prometheus +%{_datadir}/ceph/mgr/rbd_support +%{_datadir}/ceph/mgr/restful +%{_datadir}/ceph/mgr/selftest +%{_datadir}/ceph/mgr/snap_schedule +%{_datadir}/ceph/mgr/stats +%{_datadir}/ceph/mgr/status +%{_datadir}/ceph/mgr/telegraf +%{_datadir}/ceph/mgr/telemetry +%{_datadir}/ceph/mgr/test_orchestrator +%{_datadir}/ceph/mgr/volumes +%{_datadir}/ceph/mgr/zabbix + +%files mgr-rook +%{_datadir}/ceph/mgr/rook + +%post mgr-rook +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%postun mgr-rook +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%files mgr-k8sevents +%{_datadir}/ceph/mgr/k8sevents + +%post mgr-k8sevents +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi + +%postun mgr-k8sevents +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : +fi -%post mgr-diskprediction-cloud +%files mgr-cephadm +%{_datadir}/ceph/mgr/cephadm + +%post mgr-cephadm if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi -%postun mgr-diskprediction-cloud +%postun mgr-cephadm if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi @@ -1538,13 +1860,7 @@ fi %endif %postun mon -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-mon@\*.service ceph-mon.target -%endif -%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} %systemd_postun ceph-mon@\*.service ceph-mon.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1561,9 +1877,51 @@ fi %{_bindir}/ceph-fuse %{_mandir}/man8/ceph-fuse.8* %{_sbindir}/mount.fuse.ceph +%{_mandir}/man8/mount.fuse.ceph.8* %{_unitdir}/ceph-fuse@.service %{_unitdir}/ceph-fuse.target +%files -n cephfs-mirror +%{_bindir}/cephfs-mirror +%{_mandir}/man8/cephfs-mirror.8* +%{_unitdir}/cephfs-mirror@.service +%{_unitdir}/cephfs-mirror.target + +%post -n cephfs-mirror +%if 0%{?suse_version} +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl preset cephfs-mirror@\*.service cephfs-mirror.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} +%systemd_post cephfs-mirror@\*.service cephfs-mirror.target +%endif +if [ $1 -eq 1 ] ; then +/usr/bin/systemctl start cephfs-mirror.target >/dev/null 2>&1 || : +fi + +%preun -n cephfs-mirror +%if 0%{?suse_version} +%service_del_preun cephfs-mirror@\*.service cephfs-mirror.target +%endif +%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} +%systemd_preun cephfs-mirror@\*.service cephfs-mirror.target +%endif + +%postun -n cephfs-mirror +%systemd_postun cephfs-mirror@\*.service cephfs-mirror.target +if [ $1 -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart cephfs-mirror@\*.service > /dev/null 2>&1 || : + fi +fi + %files -n rbd-fuse %{_bindir}/rbd-fuse %{_mandir}/man8/rbd-fuse.8* @@ -1596,13 +1954,48 @@ fi %endif %postun -n rbd-mirror +%systemd_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +if [ $1 -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart ceph-rbd-mirror@\*.service > /dev/null 2>&1 || : + fi +fi + +%files immutable-object-cache +%{_bindir}/ceph-immutable-object-cache +%{_mandir}/man8/ceph-immutable-object-cache.8* +%{_unitdir}/ceph-immutable-object-cache@.service +%{_unitdir}/ceph-immutable-object-cache.target + +%post immutable-object-cache %if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +if [ $1 -eq 1 ] ; then + /usr/bin/systemctl preset ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target >/dev/null 2>&1 || : +fi %endif %if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} -%systemd_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +%systemd_post ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target +%endif +if [ $1 -eq 1 ] ; then +/usr/bin/systemctl start ceph-immutable-object-cache.target >/dev/null 2>&1 || : +fi + +%preun immutable-object-cache +%if 0%{?suse_version} +%service_del_preun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target +%endif +%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} +%systemd_preun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target %endif + +%postun immutable-object-cache +%systemd_postun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1611,13 +2004,15 @@ if [ $1 -ge 1 ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl try-restart ceph-rbd-mirror@\*.service > /dev/null 2>&1 || : + /usr/bin/systemctl try-restart ceph-immutable-object-cache@\*.service > /dev/null 2>&1 || : fi fi %files -n rbd-nbd %{_bindir}/rbd-nbd %{_mandir}/man8/rbd-nbd.8* +%dir %{_libexecdir}/rbd-nbd +%{_libexecdir}/rbd-nbd/rbd-nbd_quiesce %files radosgw %{_bindir}/ceph-diff-sorted @@ -1625,13 +2020,17 @@ fi %{_bindir}/radosgw-token %{_bindir}/radosgw-es %{_bindir}/radosgw-object-expirer +%{_bindir}/rgw-gap-list +%{_bindir}/rgw-gap-list-comparator %{_bindir}/rgw-orphan-list +%{_libdir}/libradosgw.so* %{_mandir}/man8/radosgw.8* %dir %{_localstatedir}/lib/ceph/radosgw %{_unitdir}/ceph-radosgw@.service %{_unitdir}/ceph-radosgw.target %post radosgw +/sbin/ldconfig %if 0%{?suse_version} if [ $1 -eq 1 ] ; then /usr/bin/systemctl preset ceph-radosgw@\*.service ceph-radosgw.target >/dev/null 2>&1 || : @@ -1653,13 +2052,8 @@ fi %endif %postun radosgw -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-radosgw@\*.service ceph-radosgw.target -%endif -%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} +/sbin/ldconfig %systemd_postun ceph-radosgw@\*.service ceph-radosgw.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1675,6 +2069,7 @@ fi %files osd %{_bindir}/ceph-clsinfo %{_bindir}/ceph-bluestore-tool +%{_bindir}/ceph-erasure-code-tool %{_bindir}/ceph-objectstore-tool %{_bindir}/ceph-osdomap-tool %{_bindir}/ceph-osd @@ -1691,7 +2086,6 @@ fi %{_unitdir}/ceph-volume@.service %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/osd %config(noreplace) %{_sysctldir}/90-ceph-osd.conf -%{_sysconfdir}/sudoers.d/ceph-osd-smartctl %post osd %if 0%{?suse_version} @@ -1720,13 +2114,7 @@ fi %endif %postun osd -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target -%endif -%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} %systemd_postun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1739,6 +2127,11 @@ if [ $1 -ge 1 ] ; then fi fi +%if 0%{with seastar} +%files crimson-osd +%{_bindir}/crimson-osd +%endif + %if %{with ocf} %files resource-agents @@ -1788,6 +2181,16 @@ fi %{python3_sitearch}/rados.cpython*.so %{python3_sitearch}/rados-*.egg-info +%files -n libcephsqlite +%{_libdir}/libcephsqlite.so + +%post -n libcephsqlite -p /sbin/ldconfig + +%postun -n libcephsqlite -p /sbin/ldconfig + +%files -n libcephsqlite-devel +%{_includedir}/libcephsqlite.h + %if 0%{with libradosstriper} %files -n libradosstriper1 %{_libdir}/libradosstriper.so.* @@ -1808,6 +2211,8 @@ fi %if %{with lttng} %{_libdir}/librbd_tp.so.* %endif +%dir %{_libdir}/ceph/librbd +%{_libdir}/ceph/librbd/libceph_*.so* %post -n librbd1 -p /sbin/ldconfig @@ -1825,7 +2230,6 @@ fi %files -n librgw2 %{_libdir}/librgw.so.* -%{_libdir}/librgw_admin_user.so.* %if %{with lttng} %{_libdir}/librgw_op_tp.so.* %{_libdir}/librgw_rados_tp.so.* @@ -1838,10 +2242,8 @@ fi %files -n librgw-devel %dir %{_includedir}/rados %{_includedir}/rados/librgw.h -%{_includedir}/rados/librgw_admin_user.h %{_includedir}/rados/rgw_file.h %{_libdir}/librgw.so -%{_libdir}/librgw_admin_user.so %if %{with lttng} %{_libdir}/librgw_op_tp.so %{_libdir}/librgw_rados_tp.so @@ -1867,8 +2269,19 @@ fi %dir %{_includedir}/cephfs %{_includedir}/cephfs/libcephfs.h %{_includedir}/cephfs/ceph_ll_client.h +%dir %{_includedir}/cephfs/metrics +%{_includedir}/cephfs/metrics/Types.h %{_libdir}/libcephfs.so +%if %{with jaeger} +%files -n libjaeger +%{_libdir}/libopentracing.so.* +%{_libdir}/libthrift.so.* +%{_libdir}/libjaegertracing.so.* +%post -n libjaeger -p /sbin/ldconfig +%postun -n libjaeger -p /sbin/ldconfig +%endif + %files -n python%{python3_pkgversion}-cephfs %{python3_sitearch}/cephfs.cpython*.so %{python3_sitearch}/cephfs-*.egg-info @@ -1881,19 +2294,27 @@ fi %{python3_sitelib}/ceph_daemon.py %{python3_sitelib}/__pycache__/ceph_daemon.cpython*.py* +%files -n python%{python3_pkgversion}-ceph-common +%{python3_sitelib}/ceph +%{python3_sitelib}/ceph-*.egg-info + %if 0%{with cephfs_shell} %files -n cephfs-shell %{python3_sitelib}/cephfs_shell-*.egg-info %{_bindir}/cephfs-shell %endif +%files -n cephfs-top +%{python3_sitelib}/cephfs_top-*.egg-info +%{_bindir}/cephfs-top +%{_mandir}/man8/cephfs-top.8* + %if 0%{with ceph_test_package} %files -n ceph-test %{_bindir}/ceph-client-debug %{_bindir}/ceph_bench_log %{_bindir}/ceph_kvstorebench %{_bindir}/ceph_multi_stress_watch -%{_bindir}/ceph_erasure_code %{_bindir}/ceph_erasure_code_benchmark %{_bindir}/ceph_omapbench %{_bindir}/ceph_objectstore_bench @@ -1910,7 +2331,10 @@ fi %{_bindir}/ceph_test_* %{_bindir}/ceph-coverage %{_bindir}/ceph-debugpack -%{_bindir}/cephdeduptool +%{_bindir}/ceph-dedup-tool +%if 0%{with seastar} +%{_bindir}/crimson-store-nbd +%endif %{_mandir}/man8/ceph-debugpack.8* %dir %{_libdir}/ceph %{_libdir}/ceph/ceph-monstore-update-crush.sh @@ -1962,13 +2386,21 @@ if diff ${FILE_CONTEXT} ${FILE_CONTEXT}.pre > /dev/null 2>&1; then exit 0 fi +# Stop ceph.target while relabeling if CEPH_AUTO_RESTART_ON_UPGRADE=yes +SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph +if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH +fi + # Check whether the daemons are running /usr/bin/systemctl status ceph.target > /dev/null 2>&1 STATUS=$? # Stop the daemons if they were running if test $STATUS -eq 0; then - /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + fi fi # Relabel the files fix for first package install @@ -1980,7 +2412,9 @@ rm -f ${FILE_CONTEXT}.pre # Start the daemons iff they were running before if test $STATUS -eq 0; then - /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + fi fi exit 0 @@ -2000,13 +2434,21 @@ if [ $1 -eq 0 ]; then exit 0 fi + # Stop ceph.target while relabeling if CEPH_AUTO_RESTART_ON_UPGRADE=yes + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + # Check whether the daemons are running /usr/bin/systemctl status ceph.target > /dev/null 2>&1 STATUS=$? # Stop the daemons if they were running if test $STATUS -eq 0; then - /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + fi fi /usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null @@ -2016,7 +2458,9 @@ if [ $1 -eq 0 ]; then # Start the daemons if they were running before if test $STATUS -eq 0; then - /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + fi fi fi exit 0 @@ -2026,23 +2470,29 @@ exit 0 %if 0%{?suse_version} %attr(0755,root,root) %dir %{_sysconfdir}/grafana %attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards -%attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards/ceph-dashboard -%else -%attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards/ceph-dashboard %endif +%attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards/ceph-dashboard %config %{_sysconfdir}/grafana/dashboards/ceph-dashboard/* %doc monitoring/grafana/dashboards/README %doc monitoring/grafana/README.md -%if 0%{?suse_version} %files prometheus-alerts -%dir /etc/prometheus/SUSE/ -%dir /etc/prometheus/SUSE/default_rules/ -%config /etc/prometheus/SUSE/default_rules/ceph_default_alerts.yml +%if 0%{?suse_version} +%attr(0755,root,root) %dir %{_sysconfdir}/prometheus %endif - +%attr(0755,root,root) %dir %{_sysconfdir}/prometheus/ceph +%config %{_sysconfdir}/prometheus/ceph/ceph_default_alerts.yml %changelog +* Thu Dec 30 2021 liuqinfei <18138800392@163.com> - 1:16.2.7-0 +- update to 16.2.7 + +* Fri Nov 5 2021 Dai Zhiwei - 1:14.2.15-7 +- fix aarch64 crc32c unittest error + +* Fri Aug 13 2021 yanglongkang - 1:14.2.15-6 +- fix build error PTHREAD_STACJK_MIN + * Mon Jul 26 2021 chixinze - 1:14.2.15-5 - fix CVE-2021-3524 @@ -2064,7 +2514,7 @@ exit 0 * Thu Nov 03 2020 yanglongkang - 1:12.2.8-8 - remove python2 dependency -* Fri Ooc 28 2020 chixinze - 1:12.2.8-7 +* Fri Oct 28 2020 chixinze - 1:12.2.8-7 - ceph.spec: fix source code download URL * Fri Mar 20 2020 hy-euler - 1:12.2.8-6