1 Star 0 Fork 2

HoperunHarmony/stress-ng

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
克隆/下载
core-helper.c 58.43 KB
一键复制 编辑 原始数据 按行查看 历史
丁伯伦 提交于 2022-04-22 11:01 . bug:移植3516的stress-ng编译完成
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837
/*
* Copyright (C) 2014-2021 Canonical, Ltd.
* Copyright (C) 2022 Colin Ian King.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#include "stress-ng.h"
#include "git-commit-id.h"
#include "core-capabilities.h"
#include "core-hash.h"
#define VERSION "0.14.00"
#if defined(HAVE_LINUX_FIEMAP_H)
#include <linux/fiemap.h>
#endif
#if defined(HAVE_SYS_AUXV_H)
#include <sys/auxv.h>
#endif
#if defined(HAVE_SYS_CAPABILITY_H)
#include <sys/capability.h>
#endif
#if defined(HAVE_SYS_LOADAVG_H)
#include <sys/loadavg.h>
#endif
#if defined(HAVE_SYS_MOUNT_H)
#include <sys/mount.h>
#endif
#if defined(HAVE_SYS_PRCTL_H)
#include <sys/prctl.h>
#endif
#if defined(HAVE_SYS_STATVFS_H)
#include <sys/statvfs.h>
#endif
#if defined(HAVE_SYS_UTSNAME_H)
#include <sys/utsname.h>
#endif
#if defined(HAVE_LINUX_FS_H)
#include <linux/fs.h>
#endif
/* prctl(2) timer slack support */
#if defined(HAVE_SYS_PRCTL_H) && \
defined(HAVE_PRCTL) && \
defined(PR_SET_TIMERSLACK) && \
defined(PR_GET_TIMERSLACK)
#define HAVE_PRCTL_TIMER_SLACK
#endif
#if !defined(PR_SET_DISABLE)
#define SUID_DUMP_DISABLE (0) /* No setuid dumping */
#endif
#if !defined(SUID_DUMP_USER)
#define SUID_DUMP_USER (1) /* Dump as user of process */
#endif
#if defined(NSIG)
#define STRESS_NSIG NSIG
#elif defined(_NSIG)
#define STRESS_NSIG _NSIG
#endif
#if defined(__TINYC__) || defined(__PCC__)
int __dso_handle;
#endif
#define STRESS_ABS_MIN_STACK_SIZE (64 * 1024)
static bool stress_stack_check_flag;
typedef struct {
const int signum;
const char *name;
} stress_sig_name_t;
#define SIG_NAME(x) { x, #x }
static const stress_sig_name_t sig_names[] = {
#if defined(SIGABRT)
SIG_NAME(SIGABRT),
#endif
#if defined(SIGALRM)
SIG_NAME(SIGALRM),
#endif
#if defined(SIGBUS)
SIG_NAME(SIGBUS),
#endif
#if defined(SIGCHLD)
SIG_NAME(SIGCHLD),
#endif
#if defined(SIGCLD)
SIG_NAME(SIGCLD),
#endif
#if defined(SIGCONT)
SIG_NAME(SIGCONT),
#endif
#if defined(SIGEMT)
SIG_NAME(SIGEMT),
#endif
#if defined(SIGFPE)
SIG_NAME(SIGFPE),
#endif
#if defined(SIGHUP)
SIG_NAME(SIGHUP),
#endif
#if defined(SIGILL)
SIG_NAME(SIGILL),
#endif
#if defined(SIGINFO)
SIG_NAME(SIGINFO),
#endif
#if defined(SIGINT)
SIG_NAME(SIGINT),
#endif
#if defined(SIGIO)
SIG_NAME(SIGIO),
#endif
#if defined(SIGIOT)
SIG_NAME(SIGIOT),
#endif
#if defined(SIGKILL)
SIG_NAME(SIGKILL),
#endif
#if defined(SIGLOST)
SIG_NAME(SIGLOST),
#endif
#if defined(SIGPIPE)
SIG_NAME(SIGPIPE),
#endif
#if defined(SIGPOLL)
SIG_NAME(SIGPOLL),
#endif
#if defined(SIGPROF)
SIG_NAME(SIGPROF),
#endif
#if defined(SIGPWR)
SIG_NAME(SIGPWR),
#endif
#if defined(SIGQUIT)
SIG_NAME(SIGQUIT),
#endif
#if defined(SIGSEGV)
SIG_NAME(SIGSEGV),
#endif
#if defined(SIGSTKFLT)
SIG_NAME(SIGSTKFLT),
#endif
#if defined(SIGSTOP)
SIG_NAME(SIGSTOP),
#endif
#if defined(SIGTSTP)
SIG_NAME(SIGTSTP),
#endif
#if defined(SIGSYS)
SIG_NAME(SIGSYS),
#endif
#if defined(SIGTERM)
SIG_NAME(SIGTERM),
#endif
#if defined(SIGTRAP)
SIG_NAME(SIGTRAP),
#endif
#if defined(SIGTTIN)
SIG_NAME(SIGTTIN),
#endif
#if defined(SIGTTOU)
SIG_NAME(SIGTTOU),
#endif
#if defined(SIGUNUSED)
SIG_NAME(SIGUNUSED),
#endif
#if defined(SIGURG)
SIG_NAME(SIGURG),
#endif
#if defined(SIGUSR1)
SIG_NAME(SIGUSR1),
#endif
#if defined(SIGUSR2)
SIG_NAME(SIGUSR2),
#endif
#if defined(SIGVTALRM)
SIG_NAME(SIGVTALRM),
#endif
#if defined(SIGXCPU)
SIG_NAME(SIGXCPU),
#endif
#if defined(SIGXFSZ)
SIG_NAME(SIGXFSZ),
#endif
#if defined(SIGWINCH)
SIG_NAME(SIGWINCH),
#endif
};
static char *stress_temp_path;
/*
* stress_temp_path_free()
* free and NULLify temporary file path
*/
void stress_temp_path_free(void)
{
if (stress_temp_path)
free(stress_temp_path);
stress_temp_path = NULL;
}
/*
* stress_set_temp_path()
* set temporary file path, default
* is . - current dir
*/
int stress_set_temp_path(const char *path)
{
stress_temp_path_free();
stress_temp_path = stress_const_optdup(path);
if (!stress_temp_path) {
(void)fprintf(stderr, "aborting: cannot allocate memory for '%s'\n", path);
return -1;
}
return 0;
}
/*
* stress_get_temp_path()
* get temporary file path, return "." if null
*/
const char *stress_get_temp_path(void)
{
if (!stress_temp_path)
return ".";
return stress_temp_path;
}
/*
* stress_check_temp_path()
* check if temp path is accessible
*/
int stress_check_temp_path(void)
{
const char *path = stress_get_temp_path();
if (access(path, R_OK | W_OK) < 0) {
(void)fprintf(stderr, "aborting: temp-path '%s' must be readable "
"and writeable\n", path);
return -1;
}
return 0;
}
/*
* stress_mk_filename()
* generate a full file name from a path and filename
*/
size_t stress_mk_filename(
char *fullname,
const size_t fullname_len,
const char *pathname,
const char *filename)
{
/*
* This may not be efficient, but it works. Do not
* be tempted to optimize this, it is not used frequently
* and is not a CPU bottleneck.
*/
(void)shim_strlcpy(fullname, pathname, fullname_len);
(void)shim_strlcat(fullname, "/", fullname_len);
return shim_strlcat(fullname, filename, fullname_len);
}
/*
* stress_get_page_size()
* get page_size
*/
size_t stress_get_page_size(void)
{
static size_t page_size = 0;
/* Use cached size */
if (page_size > 0)
return page_size;
#if defined(_SC_PAGESIZE)
{
/* Use modern sysconf */
long sz = sysconf(_SC_PAGESIZE);
if (sz > 0) {
page_size = (size_t)sz;
return page_size;
}
}
#else
UNEXPECTED
#endif
#if defined(HAVE_GETPAGESIZE)
{
/* Use deprecated getpagesize */
long sz = getpagesize();
if (sz > 0) {
page_size = (size_t)sz;
return page_size;
}
}
#endif
/* Guess */
page_size = PAGE_4K;
return page_size;
}
/*
* stress_get_processors_online()
* get number of processors that are online
*/
int32_t stress_get_processors_online(void)
{
static int32_t processors_online = 0;
if (processors_online > 0)
return processors_online;
#if defined(_SC_NPROCESSORS_ONLN)
processors_online = (int32_t)sysconf(_SC_NPROCESSORS_ONLN);
if (processors_online < 0)
processors_online = 1;
#else
processors_online = 1;
UNEXPECTED
#endif
return processors_online;
}
/*
* stress_get_processors_configured()
* get number of processors that are configured
*/
int32_t stress_get_processors_configured(void)
{
static int32_t processors_configured = 0;
if (processors_configured > 0)
return processors_configured;
#if defined(_SC_NPROCESSORS_CONF)
processors_configured = (int32_t)sysconf(_SC_NPROCESSORS_CONF);
if (processors_configured < 0)
processors_configured = stress_get_processors_online();
#else
processors_configured = 1;
UNEXPECTED
#endif
return processors_configured;
}
/*
* stress_get_ticks_per_second()
* get number of ticks perf second
*/
int32_t stress_get_ticks_per_second(void)
{
#if defined(_SC_CLK_TCK)
static int32_t ticks_per_second = 0;
if (ticks_per_second > 0)
return ticks_per_second;
ticks_per_second = (int32_t)sysconf(_SC_CLK_TCK);
return ticks_per_second;
#else
UNEXPECTED
return -1;
#endif
}
/*
* stress_get_memlimits()
* get SHMALL and memory in system
* these are set to zero on failure
*/
void stress_get_memlimits(
size_t *shmall,
size_t *freemem,
size_t *totalmem,
size_t *freeswap)
{
#if defined(HAVE_SYS_SYSINFO_H) && \
defined(HAVE_SYSINFO)
struct sysinfo info;
FILE *fp;
#endif
*shmall = 0;
*freemem = 0;
*totalmem = 0;
*freeswap = 0;
#if defined(HAVE_SYS_SYSINFO_H) && \
defined(HAVE_SYSINFO)
(void)memset(&info, 0, sizeof(info));
if (sysinfo(&info) == 0) {
*freemem = info.freeram * info.mem_unit;
*totalmem = info.totalram * info.mem_unit;
*freeswap = info.freeswap * info.mem_unit;
}
fp = fopen("/proc/sys/kernel/shmall", "r");
if (!fp)
return;
if (fscanf(fp, "%zu", shmall) != 1) {
(void)fclose(fp);
return;
}
(void)fclose(fp);
#else
UNEXPECTED
#endif
}
#if defined(_SC_AVPHYS_PAGES)
#define STRESS_SC_PAGES _SC_AVPHYS_PAGES
#elif defined(_SC_PHYS_PAGES)
#define STRESS_SC_PAGES _SC_PHYS_PAGES
#endif
/*
* stress_get_phys_mem_size()
* get size of physical memory still available, 0 if failed
*/
uint64_t stress_get_phys_mem_size(void)
{
#if defined(STRESS_SC_PAGES)
uint64_t phys_pages = 0;
const size_t page_size = stress_get_page_size();
const uint64_t max_pages = ~0ULL / page_size;
phys_pages = (uint64_t)sysconf(STRESS_SC_PAGES);
/* Avoid overflow */
if (phys_pages > max_pages)
phys_pages = max_pages;
return phys_pages * page_size;
#else
UNEXPECTED
return 0ULL;
#endif
}
/*
* stress_get_filesystem_size()
* get size of free space still available on the
* file system where stress temporary path is located,
* return 0 if failed
*/
uint64_t stress_get_filesystem_size(void)
{
#if defined(HAVE_SYS_STATVFS_H)
int rc;
struct statvfs buf;
fsblkcnt_t blocks, max_blocks;
const char *path = stress_get_temp_path();
if (!path)
return 0;
(void)memset(&buf, 0, sizeof(buf));
rc = statvfs(path, &buf);
if (rc < 0)
return 0;
max_blocks = (~(fsblkcnt_t)0) / buf.f_bsize;
blocks = buf.f_bavail;
if (blocks > max_blocks)
blocks = max_blocks;
return (uint64_t)buf.f_bsize * blocks;
#else
UNEXPECTED
return 0ULL;
#endif
}
/*
* stress_get_filesystem_available_inodes()
* get number of free available inodes on the current stress
* temporary path, return 0 if failed
*/
uint64_t stress_get_filesystem_available_inodes(void)
{
#if defined(HAVE_SYS_STATVFS_H)
int rc;
struct statvfs buf;
const char *path = stress_get_temp_path();
if (!path)
return 0;
(void)memset(&buf, 0, sizeof(buf));
rc = statvfs(path, &buf);
if (rc < 0)
return 0;
return (uint64_t)buf.f_favail;
#else
UNEXPECTED
return 0ULL;
#endif
}
/*
* stress_set_nonblock()
* try to make fd non-blocking
*/
int stress_set_nonblock(const int fd)
{
int flags;
#if defined(O_NONBLOCK)
if ((flags = fcntl(fd, F_GETFL, 0)) < 0)
flags = 0;
return fcntl(fd, F_SETFL, O_NONBLOCK | flags);
#else
UNEXPECTED
flags = 1;
return ioctl(fd, FIOBIO, &flags);
#endif
}
/*
* stress_get_load_avg()
* get load average
*/
int stress_get_load_avg(
double *min1,
double *min5,
double *min15)
{
#if defined(HAVE_GETLOADAVG) && \
!defined(__UCLIBC__)
int rc;
double loadavg[3];
loadavg[0] = 0.0;
loadavg[1] = 0.0;
loadavg[2] = 0.0;
rc = getloadavg(loadavg, 3);
if (rc < 0)
goto fail;
*min1 = loadavg[0];
*min5 = loadavg[1];
*min15 = loadavg[2];
return 0;
fail:
#elif defined(HAVE_SYS_SYSINFO_H) && \
defined(HAVE_SYSINFO)
struct sysinfo info;
const double scale = 1.0 / (double)(1 << SI_LOAD_SHIFT);
if (sysinfo(&info) < 0)
goto fail;
*min1 = info.loads[0] * scale;
*min5 = info.loads[1] * scale;
*min15 = info.loads[2] * scale;
return 0;
fail:
#endif
*min1 = *min5 = *min15 = 0.0;
return -1;
}
/*
* stress_parent_died_alarm()
* send child SIGALRM if the parent died
*/
void stress_parent_died_alarm(void)
{
#if defined(HAVE_PRCTL) && \
defined(HAVE_SYS_PRCTL_H) && \
defined(PR_SET_PDEATHSIG)
(void)prctl(PR_SET_PDEATHSIG, SIGALRM);
#else
UNEXPECTED
#endif
}
/*
* stress_process_dumpable()
* set dumpable flag, e.g. produce a core dump or not,
* don't print an error if these fail, it's not that
* critical
*/
int stress_process_dumpable(const bool dumpable)
{
int fd, rc = 0;
#if defined(RLIMIT_CORE)
{
struct rlimit lim;
int ret;
ret = getrlimit(RLIMIT_CORE, &lim);
if (ret == 0) {
lim.rlim_cur = 0;
(void)setrlimit(RLIMIT_CORE, &lim);
}
lim.rlim_cur = 0;
lim.rlim_max = 0;
(void)setrlimit(RLIMIT_CORE, &lim);
}
#else
UNEXPECTED
#endif
/*
* changing PR_SET_DUMPABLE also affects the
* oom adjust capability, so for now, we disable
* this as I'd rather have a oom'able process when
* memory gets constrained. Don't enable this
* unless one checks that processes able oomable!
*/
#if 0 && defined(HAVE_PRCTL) && \
defined(HAVE_SYS_PRCTL_H) && \
defined(PR_SET_DUMPABLE)
(void)prctl(PR_SET_DUMPABLE,
dumpable ? SUID_DUMP_USER : SUID_DUMP_DISABLE);
#endif
if ((fd = open("/proc/self/coredump_filter", O_WRONLY)) >= 0) {
char const *str =
dumpable ? "0x33" : "0x00";
if (write(fd, str, strlen(str)) < 0)
rc = -1;
(void)close(fd);
}
return rc;
}
/*
* stress_set_timer_slackigned_longns()
* set timer slack in nanoseconds
*/
int stress_set_timer_slack_ns(const char *opt)
{
#if defined(HAVE_PRCTL_TIMER_SLACK)
uint32_t timer_slack;
timer_slack = stress_get_uint32(opt);
(void)stress_set_setting("timer-slack", TYPE_ID_UINT32, &timer_slack);
#else
UNEXPECTED
(void)opt;
#endif
return 0;
}
/*
* stress_set_timer_slack()
* set timer slack
*/
void stress_set_timer_slack(void)
{
#if defined(HAVE_PRCTL) && \
defined(HAVE_SYS_PRCTL_H) && \
defined(HAVE_PRCTL_TIMER_SLACK)
uint32_t timer_slack;
if (stress_get_setting("timer-slack", &timer_slack))
(void)prctl(PR_SET_TIMERSLACK, timer_slack);
#else
UNEXPECTED
#endif
}
/*
* stress_set_proc_name_init()
* init setproctitle if supported
*/
void stress_set_proc_name_init(int argc, char *argv[], char *envp[])
{
#if defined(HAVE_BSD_UNISTD_H) && \
defined(HAVE_SETPROCTITLE)
(void)setproctitle_init(argc, argv, envp);
#else
(void)argc;
(void)argv;
(void)envp;
UNEXPECTED
#endif
}
/*
* stress_set_proc_name()
* Set process name, we don't care if it fails
*/
void stress_set_proc_name(const char *name)
{
(void)name;
if (g_opt_flags & OPT_FLAGS_KEEP_NAME)
return;
#if defined(HAVE_BSD_UNISTD_H) && \
defined(HAVE_SETPROCTITLE)
/* Sets argv[0] */
setproctitle("-%s", name);
#endif
#if defined(HAVE_PRCTL) && \
defined(HAVE_SYS_PRCTL_H) && \
defined(PR_SET_NAME)
/* Sets the comm field */
(void)prctl(PR_SET_NAME, name);
#endif
}
/*
* stress_set_proc_state_str
* set process name based on run state string, see
* macros STRESS_STATE_*
*/
void stress_set_proc_state_str(const char *name, const char *str)
{
if (g_opt_flags & OPT_FLAGS_KEEP_NAME)
return;
#if defined(HAVE_BSD_UNISTD_H) && \
defined(HAVE_SETPROCTITLE)
setproctitle("-%s [%s]", name, str);
#else
(void)name;
(void)str;
#endif
}
/*
* stress_set_proc_state
* set process name based on run state, see
* macros STRESS_STATE_*
*/
void stress_set_proc_state(const char *name, const int state)
{
static const char *stress_states[] = {
"start",
"init",
"run",
"deinit",
"stop",
"exit",
"wait"
};
if ((state < 0) || (state >= (int)SIZEOF_ARRAY(stress_states)))
return;
stress_set_proc_state_str(name, stress_states[state]);
}
/*
* stress_munge_underscore()
* turn '_' to '-' in strings
*/
char *stress_munge_underscore(const char *str)
{
static char munged[128];
char *dst;
const char *src;
const size_t str_len = strlen(str);
const ssize_t len = (ssize_t)STRESS_MINIMUM(str_len, sizeof(munged) - 1);
for (src = str, dst = munged; *src && (dst - munged) < len; src++)
*dst++ = (*src == '_' ? '-' : *src);
*dst = '\0';
return munged;
}
/*
* stress_get_stack_direction_helper()
* helper to determine direction of stack
*/
static ssize_t NOINLINE OPTIMIZE0 stress_get_stack_direction_helper(const uint8_t *val1)
{
const uint8_t val2 = 0;
const ssize_t diff = &val2 - (const uint8_t *)val1;
return (diff > 0) - (diff < 0);
}
/*
* stress_get_stack_direction()
* determine which way the stack goes, up / down
* just pass in any var on the stack before calling
* return:
* 1 - stack goes down (conventional)
* 0 - error
* -1 - stack goes up (unconventional)
*/
ssize_t stress_get_stack_direction(void)
{
uint8_t val1 = 0;
uint8_t waste[64];
waste[(sizeof waste) - 1] = 0;
return stress_get_stack_direction_helper(&val1);
}
/*
* stress_get_stack_top()
* Get the stack top given the start and size of the stack,
* offset by a bit of slop. Assumes stack is > 64 bytes
*/
void *stress_get_stack_top(void *start, size_t size)
{
const size_t offset = stress_get_stack_direction() < 0 ? (size - 64) : 64;
return (void *)((char *)start + offset);
}
/*
* stress_uint64_zero()
* return uint64 zero in way that force less smart
* static analysers to realise we are doing this
* to force a division by zero. I'd like to have
* a better solution than this ghastly way.
*/
uint64_t stress_uint64_zero(void)
{
return g_shared->zero;
}
/*
* stress_base36_encode_uint64()
* encode 64 bit hash of filename into a unique base 36
* filename of up to 13 chars long + 1 char eos
*/
static void stress_base36_encode_uint64(char dst[14], uint64_t val)
{
static const char b36[] = "abcdefghijklmnopqrstuvwxyz0123456789";
const int b = 36;
char *ptr = dst;
while (val) {
*ptr++ = b36[val % b];
val /= b;
}
*ptr = '\0';
}
/*
* stress_temp_hash_truncate()
* filenames may be too long for the underlying filesystem
* so workaround this by hashing them into a 64 bit hex
* filename.
*/
static void stress_temp_hash_truncate(char *filename)
{
size_t f_namemax = 16;
size_t len = strlen(filename);
#if defined(HAVE_SYS_STATVFS_H)
struct statvfs buf;
(void)memset(&buf, 0, sizeof(buf));
if (statvfs(stress_get_temp_path(), &buf) == 0)
f_namemax = buf.f_namemax;
#endif
if (strlen(filename) > f_namemax) {
uint32_t upper, lower;
uint64_t val;
upper = stress_hash_jenkin((uint8_t *)filename, len);
lower = stress_hash_pjw(filename);
val = ((uint64_t)upper << 32) | lower;
stress_base36_encode_uint64(filename, val);
}
}
/*
* stress_temp_filename()
* construct a temp filename
*/
int stress_temp_filename(
char *path,
const size_t len,
const char *name,
const pid_t pid,
const uint32_t instance,
const uint64_t magic)
{
char directoryname[PATH_MAX];
char filename[PATH_MAX];
(void)snprintf(directoryname, sizeof(directoryname),
"tmp-%s-%d-%" PRIu32,
name, (int)pid, instance);
stress_temp_hash_truncate(directoryname);
(void)snprintf(filename, sizeof(filename),
"%s-%d-%" PRIu32 "-%" PRIu64,
name, (int)pid, instance, magic);
stress_temp_hash_truncate(filename);
return snprintf(path, len, "%s/%s/%s",
stress_get_temp_path(), directoryname, filename);
}
/*
* stress_temp_filename_args()
* construct a temp filename using info from args
*/
int stress_temp_filename_args(
const stress_args_t *args,
char *path,
const size_t len,
const uint64_t magic)
{
return stress_temp_filename(path, len, args->name,
args->pid, args->instance, magic);
}
/*
* stress_temp_dir()
* create a temporary directory name
*/
int stress_temp_dir(
char *path,
const size_t len,
const char *name,
const pid_t pid,
const uint32_t instance)
{
char directoryname[256];
(void)snprintf(directoryname, sizeof(directoryname),
"tmp-%s-%d-%" PRIu32,
name, (int)pid, instance);
stress_temp_hash_truncate(directoryname);
return snprintf(path, len, "%s/%s",
stress_get_temp_path(), directoryname);
}
/*
* stress_temp_dir_args()
* create a temporary directory name using info from args
*/
int stress_temp_dir_args(
const stress_args_t *args,
char *path,
const size_t len)
{
return stress_temp_dir(path, len,
args->name, args->pid, args->instance);
}
/*
* stress_temp_dir_mk()
* create a temporary directory
*/
int stress_temp_dir_mk(
const char *name,
const pid_t pid,
const uint32_t instance)
{
int ret;
char tmp[PATH_MAX];
stress_temp_dir(tmp, sizeof(tmp), name, pid, instance);
ret = mkdir(tmp, S_IRWXU);
if (ret < 0) {
ret = -errno;
pr_fail("%s: mkdir '%s' failed, errno=%d (%s)\n",
name, tmp, errno, strerror(errno));
(void)shim_rmdir(tmp);
}
return ret;
}
/*
* stress_temp_dir_mk_args()
* create a temporary director using info from args
*/
int stress_temp_dir_mk_args(const stress_args_t *args)
{
return stress_temp_dir_mk(args->name, args->pid, args->instance);
}
/*
* stress_temp_dir_rm()
* remove a temporary directory
*/
int stress_temp_dir_rm(
const char *name,
const pid_t pid,
const uint32_t instance)
{
int ret;
char tmp[PATH_MAX + 1];
stress_temp_dir(tmp, sizeof(tmp), name, pid, instance);
ret = shim_rmdir(tmp);
if (ret < 0) {
ret = -errno;
pr_fail("%s: rmdir '%s' failed, errno=%d (%s)\n",
name, tmp, errno, strerror(errno));
}
return ret;
}
/*
* stress_temp_dir_rm_args()
* remove a temporary directory using info from args
*/
int stress_temp_dir_rm_args(const stress_args_t *args)
{
return stress_temp_dir_rm(args->name, args->pid, args->instance);
}
/*
* stress_cwd_readwriteable()
* check if cwd is read/writeable
*/
void stress_cwd_readwriteable(void)
{
char path[PATH_MAX];
if (getcwd(path, sizeof(path)) == NULL) {
pr_dbg("cwd: Cannot determine current working directory\n");
return;
}
if (access(path, R_OK | W_OK)) {
pr_inf("Working directory %s is not read/writeable, "
"some I/O tests may fail\n", path);
return;
}
}
/*
* stress_signal_name()
* return string version of signal number, NULL if not found
*/
const char *stress_signal_name(const int signum)
{
size_t i;
for (i = 0; i < SIZEOF_ARRAY(sig_names); i++) {
if (signum == sig_names[i].signum)
return sig_names[i].name;
}
return NULL;
}
/*
* stress_strsignal()
* signum to human readable string
*/
const char *stress_strsignal(const int signum)
{
static char buffer[40];
const char *str = stress_signal_name(signum);
if (str)
(void)snprintf(buffer, sizeof(buffer), "signal %d '%s'",
signum, str);
else
(void)snprintf(buffer, sizeof(buffer), "signal %d", signum);
return buffer;
}
/*
* stress_strnrnd()
* fill string with random chars
*/
void stress_strnrnd(char *str, const size_t len)
{
const char *end = str + len;
while (str < end - 1)
*str++ = (stress_mwc8() % 26) + 'a';
*str = '\0';
}
/*
* stress_uint8rnd4()
* fill a uint8_t buffer full of random data
* buffer *must* be multiple of 4 bytes in size
*/
void stress_uint8rnd4(uint8_t *data, const size_t len)
{
register size_t i = len >> 2;
register uint8_t *ptr = data;
while (i--) {
register uint32_t v = stress_mwc32();
*ptr++ = (uint8_t)v;
v >>= 8;
*ptr++ = (uint8_t)v;
v >>= 8;
*ptr++ = (uint8_t)v;
v >>= 8;
*ptr++ = (uint8_t)v;
}
}
/*
* pr_run_info()
* short info about the system we are running stress-ng on
* for the -v option
*/
void pr_runinfo(void)
{
#if defined(HAVE_UNAME) && \
defined(HAVE_SYS_UTSNAME_H)
struct utsname uts;
#endif
#if defined(HAVE_SYS_SYSINFO_H) && \
defined(HAVE_SYSINFO)
struct sysinfo info;
#endif
if (!(g_opt_flags & PR_DEBUG))
return;
if (sizeof(STRESS_GIT_COMMIT_ID) > 1) {
pr_dbg("%s %s g%12.12s\n",
g_app_name, VERSION, STRESS_GIT_COMMIT_ID);
} else {
pr_dbg("%s %s\n",
g_app_name, VERSION);
}
#if defined(HAVE_UNAME) && \
defined(HAVE_SYS_UTSNAME_H)
if (uname(&uts) == 0) {
pr_dbg("system: %s %s %s %s %s\n",
uts.sysname, uts.nodename, uts.release,
uts.version, uts.machine);
}
#endif
#if defined(HAVE_SYS_SYSINFO_H) && \
defined(HAVE_SYSINFO)
/* Keep static analyzer happy */
(void)memset(&info, 0, sizeof(info));
if (sysinfo(&info) == 0) {
char ram_t[32], ram_f[32], ram_s[32];
stress_uint64_to_str(ram_t, sizeof(ram_t), (uint64_t)info.totalram);
stress_uint64_to_str(ram_f, sizeof(ram_f), (uint64_t)info.freeram);
stress_uint64_to_str(ram_s, sizeof(ram_s), (uint64_t)info.freeswap);
pr_dbg("RAM total: %s, RAM free: %s, swap free: %s\n", ram_t, ram_f, ram_s);
}
#endif
}
/*
* pr_yaml_runinfo()
* log info about the system we are running stress-ng on
*/
void pr_yaml_runinfo(FILE *yaml)
{
#if defined(HAVE_UNAME) && \
defined(HAVE_SYS_UTSNAME_H)
struct utsname uts;
#endif
#if defined(HAVE_SYS_SYSINFO_H) && \
defined(HAVE_SYSINFO)
struct sysinfo info;
#endif
time_t t;
struct tm *tm = NULL;
const size_t hostname_len = stress_hostname_length();
char hostname[hostname_len];
const char *user = shim_getlogin();
pr_yaml(yaml, "system-info:\n");
if (time(&t) != ((time_t)-1))
tm = localtime(&t);
pr_yaml(yaml, " stress-ng-version: " VERSION "\n");
pr_yaml(yaml, " run-by: %s\n", user ? user : "unknown");
if (tm) {
pr_yaml(yaml, " date-yyyy-mm-dd: %4.4d:%2.2d:%2.2d\n",
tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday);
pr_yaml(yaml, " time-hh-mm-ss: %2.2d:%2.2d:%2.2d\n",
tm->tm_hour, tm->tm_min, tm->tm_sec);
pr_yaml(yaml, " epoch-secs: %ld\n", (long)t);
}
if (!gethostname(hostname, sizeof(hostname)))
pr_yaml(yaml, " hostname: %s\n", hostname);
#if defined(HAVE_UNAME) && \
defined(HAVE_SYS_UTSNAME_H)
if (uname(&uts) == 0) {
pr_yaml(yaml, " sysname: %s\n", uts.sysname);
pr_yaml(yaml, " nodename: %s\n", uts.nodename);
pr_yaml(yaml, " release: %s\n", uts.release);
pr_yaml(yaml, " version: '%s'\n", uts.version);
pr_yaml(yaml, " machine: %s\n", uts.machine);
}
#endif
#if defined(HAVE_SYS_SYSINFO_H) && \
defined(HAVE_SYSINFO)
(void)memset(&info, 0, sizeof(info));
if (sysinfo(&info) == 0) {
pr_yaml(yaml, " uptime: %ld\n", info.uptime);
pr_yaml(yaml, " totalram: %lu\n", info.totalram);
pr_yaml(yaml, " freeram: %lu\n", info.freeram);
pr_yaml(yaml, " sharedram: %lu\n", info.sharedram);
pr_yaml(yaml, " bufferram: %lu\n", info.bufferram);
pr_yaml(yaml, " totalswap: %lu\n", info.totalswap);
pr_yaml(yaml, " freeswap: %lu\n", info.freeswap);
}
#endif
pr_yaml(yaml, " pagesize: %zd\n", stress_get_page_size());
pr_yaml(yaml, " cpus: %" PRId32 "\n", stress_get_processors_configured());
pr_yaml(yaml, " cpus-online: %" PRId32 "\n", stress_get_processors_online());
pr_yaml(yaml, " ticks-per-second: %" PRId32 "\n", stress_get_ticks_per_second());
pr_yaml(yaml, "\n");
}
/*
* stress_cache_alloc()
* allocate shared cache buffer
*/
int stress_cache_alloc(const char *name)
{
#if defined(__linux__)
stress_cpus_t *cpu_caches;
stress_cpu_cache_t *cache = NULL;
uint16_t max_cache_level = 0;
#endif
#if !defined(__linux__)
g_shared->mem_cache_size = MEM_CACHE_SIZE;
#else
cpu_caches = stress_get_all_cpu_cache_details();
if (!cpu_caches) {
if (stress_warn_once())
pr_dbg("%s: using defaults, cannot determine cache details\n", name);
g_shared->mem_cache_size = MEM_CACHE_SIZE;
goto init_done;
}
max_cache_level = stress_get_max_cache_level(cpu_caches);
if (max_cache_level == 0) {
if (stress_warn_once())
pr_dbg("%s: using defaults, cannot determine cache level details\n", name);
g_shared->mem_cache_size = MEM_CACHE_SIZE;
goto init_done;
}
if (g_shared->mem_cache_level > max_cache_level) {
if (stress_warn_once())
pr_dbg("%s: using cache maximum level L%d\n", name,
max_cache_level);
g_shared->mem_cache_level = max_cache_level;
}
cache = stress_get_cpu_cache(cpu_caches, g_shared->mem_cache_level);
if (!cache) {
if (stress_warn_once())
pr_dbg("%s: using built-in defaults as no suitable "
"cache found\n", name);
g_shared->mem_cache_size = MEM_CACHE_SIZE;
goto init_done;
}
if (g_shared->mem_cache_ways > 0) {
uint64_t way_size;
if (g_shared->mem_cache_ways > cache->ways) {
if (stress_warn_once())
pr_inf("%s: cache way value too high - "
"defaulting to %d (the maximum)\n",
name, cache->ways);
g_shared->mem_cache_ways = cache->ways;
}
way_size = cache->size / cache->ways;
/* only fill the specified number of cache ways */
g_shared->mem_cache_size = way_size * g_shared->mem_cache_ways;
} else {
/* fill the entire cache */
g_shared->mem_cache_size = cache->size;
}
if (!g_shared->mem_cache_size) {
if (stress_warn_once())
pr_dbg("%s: using built-in defaults as "
"unable to determine cache size\n", name);
g_shared->mem_cache_size = MEM_CACHE_SIZE;
}
init_done:
stress_free_cpu_caches(cpu_caches);
#endif
g_shared->mem_cache =
(uint8_t *)mmap(NULL, g_shared->mem_cache_size,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (g_shared->mem_cache == MAP_FAILED) {
g_shared->mem_cache = NULL;
pr_err("%s: failed to mmap shared cache buffer, errno=%d (%s)\n",
name, errno, strerror(errno));
return -1;
}
if (stress_warn_once())
pr_dbg("%s: shared cache buffer size: %" PRIu64 "K\n",
name, g_shared->mem_cache_size / 1024);
return 0;
}
/*
* stress_cache_free()
* free shared cache buffer
*/
void stress_cache_free(void)
{
if (g_shared->mem_cache)
(void)munmap((void *)g_shared->mem_cache, g_shared->mem_cache_size);
}
/*
* system_write()
* write a buffer to a /sys or /proc entry
*/
ssize_t system_write(
const char *path,
const char *buf,
const size_t buf_len)
{
int fd;
ssize_t ret;
fd = open(path, O_WRONLY);
if (fd < 0)
return -errno;
ret = write(fd, buf, buf_len);
if (ret < (ssize_t)buf_len)
ret = -errno;
(void)close(fd);
return ret;
}
/*
* system_read()
* read a buffer from a /sys or /proc entry
*/
ssize_t system_read(
const char *path,
char *buf,
const size_t buf_len)
{
int fd;
ssize_t ret;
(void)memset(buf, 0, buf_len);
fd = open(path, O_RDONLY);
if (fd < 0)
return -errno;
ret = read(fd, buf, buf_len);
if (ret < 0) {
buf[0] = '\0';
ret = -errno;
}
(void)close(fd);
if ((ssize_t)buf_len == ret)
buf[buf_len - 1] = '\0';
else
buf[ret] = '\0';
return ret;
}
/*
* stress_is_prime64()
* return true if 64 bit value n is prime
* http://en.wikipedia.org/wiki/Primality_test
*/
bool stress_is_prime64(const uint64_t n)
{
register uint64_t i, max;
double max_d;
if (n <= 3)
return n >= 2;
if ((n % 2 == 0) || (n % 3 == 0))
return false;
max_d = 1.0 + sqrt((double)n);
max = (uint64_t)max_d;
for (i = 5; i < max; i+= 6)
if ((n % i == 0) || (n % (i + 2) == 0))
return false;
return true;
}
/*
* stress_get_prime64()
* find a prime that is not a multiple of n,
* used for file name striding. Minimum is 1009,
* max is unbounded. Return a prime > n, each
* call will return the next prime to keep the
* primes different each call.
*/
uint64_t stress_get_prime64(const uint64_t n)
{
static uint64_t p = 1009;
uint64_t odd_n = (n & ~1) + 1;
if (p < odd_n)
p = odd_n;
/* Search for next prime.. */
for (;;) {
p += 2;
if ((n % p) && stress_is_prime64(p))
return p;
}
}
/*
* stress_get_max_file_limit()
* get max number of files that the current
* process can open not counting the files that
* may already been opened.
*/
size_t stress_get_max_file_limit(void)
{
#if defined(RLIMIT_NOFILE)
struct rlimit rlim;
#endif
size_t max_rlim = SIZE_MAX;
size_t max_sysconf;
#if defined(RLIMIT_NOFILE)
if (!getrlimit(RLIMIT_NOFILE, &rlim))
max_rlim = (size_t)rlim.rlim_cur;
#endif
#if defined(_SC_OPEN_MAX)
{
const long open_max = sysconf(_SC_OPEN_MAX);
max_sysconf = (open_max > 0) ? (size_t)open_max : SIZE_MAX;
}
#else
max_sysconf = SIZE_MAX;
UNEXPECTED
#endif
/* return the lowest of these two */
return STRESS_MINIMUM(max_rlim, max_sysconf);
}
/*
* stress_get_open_count(void)
* get number of open file descriptors
*/
static inline size_t stress_get_open_count(void)
{
#if defined(__linux__)
DIR *dir;
struct dirent *d;
size_t n = 0;
dir = opendir("/proc/self/fd");
if (!dir)
return -1;
while ((d = readdir(dir)) != NULL) {
if (isdigit((int)d->d_name[0]))
n++;
}
(void)closedir(dir);
/*
* opendir used one extra fd that is now
* closed, so take that off the total
*/
return (n > 1) ? (n - 1) : n;
#else
return 0;
#endif
}
/*
* stress_get_file_limit()
* get max number of files that the current
* process can open excluding currently opened
* files.
*/
size_t stress_get_file_limit(void)
{
struct rlimit rlim;
size_t i, last_opened, opened, max = 65536; /* initial guess */
if (!getrlimit(RLIMIT_NOFILE, &rlim))
max = (size_t)rlim.rlim_cur;
last_opened = 0;
opened = stress_get_open_count();
if (opened == 0) {
/* Determine max number of free file descriptors we have */
for (i = 0; i < max; i++) {
if (fcntl((int)i, F_GETFL) > -1) {
opened++;
last_opened = i;
} else {
/*
* Hack: Over 250 contiguously closed files
* most probably indicates we're at the point
* were no more opened file descriptors are
* going to be found, so bail out rather then
* scanning for any more opened files
*/
if (i - last_opened > 250)
break;
}
}
}
return max - opened;
}
/*
* stress_get_bad_fd()
* return a fd that will produce -EINVAL when using it
* either because it is not open or it is just out of range
*/
int stress_get_bad_fd(void)
{
#if defined(RLIMIT_NOFILE) && \
defined(F_GETFL)
struct rlimit rlim;
(void)memset(&rlim, 0, sizeof(rlim));
if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) {
if (rlim.rlim_cur < INT_MAX - 1) {
if (fcntl((int)rlim.rlim_cur, F_GETFL) == -1) {
return (int)rlim.rlim_cur + 1;
}
}
}
#elif defined(F_GETFL)
int i;
for (i = 2048; i > fileno(stdout); i--) {
if (fcntl((int)i, F_GETFL) == -1)
return i;
}
#else
UNEXPECTED
#endif
return -1;
}
/*
* stress_sigaltstack_no_check()
* attempt to set up an alternative signal stack with no
* minimum size check on stack
* stack - must be at least MINSIGSTKSZ
* size - size of stack (- STACK_ALIGNMENT)
*/
int stress_sigaltstack_no_check(void *stack, const size_t size)
{
#if defined(HAVE_SIGALTSTACK)
stack_t ss;
ss.ss_sp = (void *)stack;
ss.ss_size = size;
ss.ss_flags = 0;
return sigaltstack(&ss, NULL);
#else
UNEXPECTED
(void)stack;
(void)size;
return 0;
#endif
}
/*
* stress_sigaltstack()
* attempt to set up an alternative signal stack
* stack - must be at least MINSIGSTKSZ
* size - size of stack (- STACK_ALIGNMENT)
*/
int stress_sigaltstack(void *stack, const size_t size)
{
#if defined(HAVE_SIGALTSTACK)
if (size < (size_t)STRESS_MINSIGSTKSZ) {
pr_err("sigaltstack stack size %zu must be more than %zuK\n",
size, (size_t)STRESS_MINSIGSTKSZ / 1024);
return -1;
}
if (stress_sigaltstack_no_check(stack, size) < 0) {
pr_fail("sigaltstack failed: errno=%d (%s)\n",
errno, strerror(errno));
return -1;
}
#else
UNEXPECTED
(void)stack;
(void)size;
#endif
return 0;
}
/*
* stress_sighandler()
* set signal handler in generic way
*/
int stress_sighandler(
const char *name,
const int signum,
void (*handler)(int),
struct sigaction *orig_action)
{
struct sigaction new_action;
#if defined(HAVE_SIGALTSTACK)
{
static uint8_t *stack = NULL;
if (stack == NULL) {
/* Allocate stack, we currently leak this */
stack = (uint8_t *)mmap(NULL, STRESS_SIGSTKSZ, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (stack == MAP_FAILED) {
pr_inf("%s: sigaction %s: cannot allocated signal stack, "
"errno = %d (%s)\n",
name, stress_strsignal(signum),
errno, strerror(errno));
return -1;
}
if (stress_sigaltstack(stack, STRESS_SIGSTKSZ) < 0)
return -1;
}
}
#endif
(void)memset(&new_action, 0, sizeof new_action);
new_action.sa_handler = handler;
(void)sigemptyset(&new_action.sa_mask);
new_action.sa_flags = SA_ONSTACK;
if (sigaction(signum, &new_action, orig_action) < 0) {
pr_fail("%s: sigaction %s: errno=%d (%s)\n",
name, stress_strsignal(signum), errno, strerror(errno));
return -1;
}
return 0;
}
/*
* stress_sighandler_default
* restore signal handler to default handler
*/
int stress_sighandler_default(const int signum)
{
struct sigaction new_action;
(void)memset(&new_action, 0, sizeof new_action);
new_action.sa_handler = SIG_DFL;
return sigaction(signum, &new_action, NULL);
}
/*
* stress_handle_stop_stressing()
* set flag to indicate to stressor to stop stressing
*/
void stress_handle_stop_stressing(int signum)
{
(void)signum;
keep_stressing_set_flag(false);
/*
* Trigger another SIGARLM until stressor gets the message
* that it needs to terminate
*/
(void)alarm(1);
}
/*
* stress_sig_stop_stressing()
* install a handler that sets the global flag
* to indicate to a stressor to stop stressing
*/
int stress_sig_stop_stressing(const char *name, const int sig)
{
return stress_sighandler(name, sig, stress_handle_stop_stressing, NULL);
}
/*
* stress_sigrestore()
* restore a handler
*/
int stress_sigrestore(
const char *name,
const int signum,
struct sigaction *orig_action)
{
if (sigaction(signum, orig_action, NULL) < 0) {
pr_fail("%s: sigaction %s restore: errno=%d (%s)\n",
name, stress_strsignal(signum), errno, strerror(errno));
return -1;
}
return 0;
}
/*
* stress_get_cpu()
* get cpu number that process is currently on
*/
unsigned int stress_get_cpu(void)
{
#if defined(HAVE_SCHED_GETCPU) && \
!defined(__PPC64__) && \
!defined(__s390x__)
const int cpu = sched_getcpu();
return (unsigned int)((cpu < 0) ? 0 : cpu);
#else
return 0;
#endif
}
#define XSTRINGIFY(s) STRINGIFY(s)
#define STRINGIFY(s) #s
/*
* stress_get_compiler()
* return compiler info
*/
const char *stress_get_compiler(void)
{
#if defined(__ICC) && \
defined(__INTEL_COMPILER) && \
defined(__INTEL_COMPILER_UPDATE)
static const char cc[] = "icc " XSTRINGIFY(__INTEL_COMPILER) "." XSTRINGIFY(__INTEL_COMPILER_UPDATE) "";
#elif defined(__TINYC__)
static const char cc[] = "tcc " XSTRINGIFY(__TINYC__) "";
#elif defined(__PCC__) && \
defined(__PCC_MINOR__)
static const char cc[] = "pcc " XSTRINGIFY(__PCC__) "." XSTRINGIFY(__PCC_MINOR__) "." XSTRINGIFY(__PCC_MINORMINOR__) "";
#elif defined(__clang_major__) && \
defined(__clang_minor__)
static const char cc[] = "clang " XSTRINGIFY(__clang_major__) "." XSTRINGIFY(__clang_minor__) "";
#elif defined(__GNUC__) && \
defined(__GNUC_MINOR__)
static const char cc[] = "gcc " XSTRINGIFY(__GNUC__) "." XSTRINGIFY(__GNUC_MINOR__) "";
#else
static const char cc[] = "cc unknown";
#endif
return cc;
}
/*
* stress_get_uname_info()
* return uname information
*/
const char *stress_get_uname_info(void)
{
#if defined(HAVE_UNAME) && \
defined(HAVE_SYS_UTSNAME_H)
struct utsname buf;
if (!uname(&buf)) {
static char str[sizeof(buf.machine) +
sizeof(buf.sysname) +
sizeof(buf.release) + 3];
(void)snprintf(str, sizeof(str), "%s %s %s", buf.machine, buf.sysname, buf.release);
return str;
}
#else
UNEXPECTED
#endif
return "unknown";
}
/*
* stress_not_implemented()
* report that a stressor is not implemented
* on a particular arch or kernel
*/
int stress_not_implemented(const stress_args_t *args)
{
static const char msg[] = "this stressor is not implemented on "
"this system";
if (args->instance == 0) {
#if defined(HAVE_UNAME) && \
defined(HAVE_SYS_UTSNAME_H)
struct utsname buf;
if (!uname(&buf)) {
pr_inf_skip("%s: %s: %s %s\n",
args->name, msg, stress_get_uname_info(),
stress_get_compiler());
return EXIT_NOT_IMPLEMENTED;
}
#endif
pr_inf_skip("%s: %s: %s\n",
args->name, msg, stress_get_compiler());
}
return EXIT_NOT_IMPLEMENTED;
}
#if defined(F_SETPIPE_SZ)
/*
* stress_check_max_pipe_size()
* check if the given pipe size is allowed
*/
static inline int stress_check_max_pipe_size(
const size_t sz,
const size_t page_size)
{
int fds[2];
if (sz < page_size)
return -1;
if (pipe(fds) < 0)
return -1;
if (fcntl(fds[0], F_SETPIPE_SZ, sz) < 0)
return -1;
(void)close(fds[0]);
(void)close(fds[1]);
return 0;
}
#endif
/*
* stress_probe_max_pipe_size()
* determine the maximum allowed pipe size
*/
size_t stress_probe_max_pipe_size(void)
{
static size_t max_pipe_size;
#if defined(F_SETPIPE_SZ)
ssize_t ret;
size_t i, prev_sz, sz, min, max;
char buf[64];
size_t page_size;
#endif
/* Already determined? returned cached size */
if (max_pipe_size)
return max_pipe_size;
#if defined(F_SETPIPE_SZ)
page_size = stress_get_page_size();
/*
* Try and find maximum pipe size directly
*/
ret = system_read("/proc/sys/fs/pipe-max-size", buf, sizeof(buf));
if (ret > 0) {
if (sscanf(buf, "%zd", &sz) == 1)
if (!stress_check_max_pipe_size(sz, page_size))
goto ret;
}
/*
* Need to find size by binary chop probing
*/
min = page_size;
max = INT_MAX;
prev_sz = 0;
sz = 0;
for (i = 0; i < 64; i++) {
sz = min + (max - min) / 2;
if (prev_sz == sz)
return sz;
prev_sz = sz;
if (stress_check_max_pipe_size(sz, page_size) == 0) {
min = sz;
} else {
max = sz;
}
}
ret:
max_pipe_size = sz;
#else
max_pipe_size = stress_get_page_size();
#endif
return max_pipe_size;
}
/*
* stress_align_address
* align address to alignment, alignment MUST be a power of 2
*/
void *stress_align_address(const void *addr, const size_t alignment)
{
const uintptr_t uintptr =
((uintptr_t)addr + alignment) & ~(alignment - 1);
return (void *)uintptr;
}
/*
* stress_sigalrm_pending()
* return true if SIGALRM is pending
*/
bool stress_sigalrm_pending(void)
{
sigset_t set;
(void)sigemptyset(&set);
(void)sigpending(&set);
return sigismember(&set, SIGALRM);
}
/*
* stress_uint64_to_str()
* turn 64 bit size to human readable string
*/
char *stress_uint64_to_str(char *str, size_t len, const uint64_t val)
{
typedef struct {
uint64_t size;
char *suffix;
} stress_size_info_t;
static const stress_size_info_t size_info[] = {
{ EB, "E" },
{ PB, "P" },
{ TB, "T" },
{ GB, "G" },
{ MB, "M" },
{ KB, "K" },
};
size_t i;
char *suffix = "";
uint64_t scale = 1;
for (i = 0; i < SIZEOF_ARRAY(size_info); i++) {
uint64_t scaled = val / size_info[i].size;
if ((scaled >= 1) && (scaled < 1024)) {
suffix = size_info[i].suffix;
scale = size_info[i].size;
break;
}
}
(void)snprintf(str, len, "%.1f%s", (double)val / (double)scale, suffix);
return str;
}
/*
* stress_check_root()
* returns true if root
*/
static inline bool stress_check_root(void)
{
return (geteuid() == 0);
}
#if defined(HAVE_SYS_CAPABILITY_H)
/*
* stress_check_capability()
* returns true if process has the given capability,
* if capability is SHIM_CAP_IS_ROOT then just check if process is
* root.
*/
bool stress_check_capability(const int capability)
{
int ret;
struct __user_cap_header_struct uch;
struct __user_cap_data_struct ucd[_LINUX_CAPABILITY_U32S_3];
uint32_t mask;
size_t idx;
if (capability == SHIM_CAP_IS_ROOT)
return stress_check_root();
(void)memset(&uch, 0, sizeof uch);
(void)memset(ucd, 0, sizeof ucd);
uch.version = _LINUX_CAPABILITY_VERSION_3;
uch.pid = getpid();
ret = capget(&uch, ucd);
if (ret < 0)
return stress_check_root();
idx = (size_t)CAP_TO_INDEX(capability);
mask = CAP_TO_MASK(capability);
return (ucd[idx].permitted &= mask) ? true : false;
}
#else
bool stress_check_capability(const int capability)
{
(void)capability;
return stress_check_root();
}
#endif
#if defined(HAVE_SYS_CAPABILITY_H)
int stress_drop_capabilities(const char *name)
{
int ret;
uint32_t i;
struct __user_cap_header_struct uch;
struct __user_cap_data_struct ucd[_LINUX_CAPABILITY_U32S_3];
(void)memset(&uch, 0, sizeof uch);
(void)memset(ucd, 0, sizeof ucd);
uch.version = _LINUX_CAPABILITY_VERSION_3;
uch.pid = getpid();
ret = capget(&uch, ucd);
if (ret < 0) {
pr_fail("%s: capget on pid %d failed: errno=%d (%s)\n",
name, uch.pid, errno, strerror(errno));
return -1;
}
/*
* We could just memset ucd to zero, but
* lets explicitly set all the capability
* bits to zero to show the intent
*/
for (i = 0; i <= CAP_LAST_CAP; i++) {
uint32_t idx = CAP_TO_INDEX(i);
uint32_t mask = CAP_TO_MASK(i);
ucd[idx].inheritable &= ~mask;
ucd[idx].permitted &= ~mask;
ucd[idx].effective &= ~mask;
}
ret = capset(&uch, ucd);
if (ret < 0) {
pr_fail("%s: capset on pid %d failed: errno=%d (%s)\n",
name, uch.pid, errno, strerror(errno));
return -1;
}
#if defined(HAVE_PRCTL) && \
defined(HAVE_SYS_PRCTL_H) && \
defined(PR_SET_NO_NEW_PRIVS)
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
if (ret < 0) {
pr_inf("%s: prctl PR_SET_NO_NEW_PRIVS on pid %d failed: "
"errno=%d (%s)\n",
name, uch.pid, errno, strerror(errno));
return -1;
}
#endif
return 0;
}
#else
int stress_drop_capabilities(const char *name)
{
(void)name;
return 0;
}
#endif
/*
* stress_is_dot_filename()
* is filename "." or ".."
*/
bool stress_is_dot_filename(const char *name)
{
if (!strcmp(name, "."))
return true;
if (!strcmp(name, ".."))
return true;
return false;
}
/*
* stress_const_optdup(const char *opt)
* duplicate a modifiable copy of a const option string opt
*/
char *stress_const_optdup(const char *opt)
{
char *str = strdup(opt);
if (!str)
(void)fprintf(stderr, "out of memory duplicating option '%s'\n", opt);
return str;
}
/*
* stress_text_addr()
* return length and start/end addresses of text segment
*/
size_t stress_text_addr(char **start, char **end)
{
#if defined(HAVE_EXECUTABLE_START)
extern char __executable_start;
intptr_t text_start = (intptr_t)&__executable_start;
#elif defined(__APPLE__)
extern char _mh_execute_header;
intptr_t text_start = (intptr_t)&_mh_execute_header;
#elif defined(__OpenBSD__)
extern char _start[];
intptr_t text_start = (intptr_t)&_start[0];
#elif defined(__TINYC__)
extern char _start;
intptr_t text_start = (intptr_t)&_start;
#else
extern char _start;
intptr_t text_start = (intptr_t)&_start;
#endif
#if defined(__APPLE__)
extern void *get_etext(void);
intptr_t text_end = (intptr_t)get_etext();
#elif defined(__TINYC__)
extern char _etext;
intptr_t text_end = (intptr_t)&_etext;
#else
extern char etext;
intptr_t text_end = (intptr_t)&etext;
#endif
const size_t text_len = (size_t)(text_end - text_start);
if ((start == NULL) || (end == NULL) || (text_start >= text_end))
return 0;
*start = (char *)text_start;
*end = (char *)text_end;
return text_len;
}
/*
* stress_is_dev_tty()
* return true if fd is on a /dev/ttyN device. If it can't
* be determined than default to assuming it is.
*/
bool stress_is_dev_tty(const int fd)
{
#if defined(HAVE_TTYNAME)
const char *name = ttyname(fd);
if (!name)
return true;
return !strncmp("/dev/tty", name, 8);
#else
UNEXPECTED
(void)fd;
/* Assume it is */
return true;
#endif
}
/*
* stress_dirent_list_free()
* free dirent list
*/
void stress_dirent_list_free(struct dirent **dlist, const int n)
{
if (dlist) {
int i;
for (i = 0; i < n; i++) {
if (dlist[i])
free(dlist[i]);
}
free(dlist);
}
}
/*
* stress_dirent_list_prune()
* remove . and .. files from directory list
*/
int stress_dirent_list_prune(struct dirent **dlist, const int n)
{
int i, j;
for (i = 0, j = 0; i < n; i++) {
if (dlist[i]) {
if (stress_is_dot_filename(dlist[i]->d_name)) {
free(dlist[i]);
dlist[i] = NULL;
} else {
dlist[j] = dlist[i];
j++;
}
}
}
return j;
}
/*
* stress_warn_once_hash()
* computes a hash for a filename and a line and stores it,
* returns true if this is the first time this has been
* called for that specific filename and line
*
* Without libpthread this is potentially racy.
*/
bool stress_warn_once_hash(const char *filename, const int line)
{
uint32_t free_slot, i, j, h = (stress_hash_pjw(filename) + (uint32_t)line);
bool not_warned_yet = true;
#if defined(HAVE_LIB_PTHREAD)
int ret;
#endif
if (!g_shared)
return true;
#if defined(HAVE_LIB_PTHREAD)
ret = shim_pthread_spin_lock(&g_shared->warn_once.lock);
#endif
free_slot = STRESS_WARN_HASH_MAX;
/*
* Ensure hash is never zero so that it does not
* match and empty slot value of zero
*/
if (h == 0)
h += STRESS_WARN_HASH_MAX;
j = h % STRESS_WARN_HASH_MAX;
for (i = 0; i < STRESS_WARN_HASH_MAX; i++) {
if (g_shared->warn_once.hash[j] == h) {
not_warned_yet = false;
goto unlock;
}
if ((free_slot == STRESS_WARN_HASH_MAX) &&
(g_shared->warn_once.hash[j] == 0)) {
free_slot = j;
}
j = (j + 1) % STRESS_WARN_HASH_MAX;
}
if (free_slot != STRESS_WARN_HASH_MAX) {
g_shared->warn_once.hash[free_slot] = h;
}
unlock:
#if defined(HAVE_LIB_PTHREAD)
if (!ret)
shim_pthread_spin_unlock(&g_shared->warn_once.lock);
#endif
return not_warned_yet;
}
/*
* stress_ipv4_checksum()
* ipv4 data checksum
*/
uint16_t HOT OPTIMIZE3 stress_ipv4_checksum(uint16_t *ptr, const size_t sz)
{
register uint32_t sum = 0;
register size_t n = sz;
while (n > 1) {
sum += *ptr++;
n -= 2;
}
if (n)
sum += *(uint8_t*)ptr;
sum = (sum >> 16) + (sum & 0xffff);
sum += (sum >> 16);
return (uint16_t)~sum;
}
#if defined(HAVE_SETPWENT) && \
defined(HAVE_GETPWENT) && \
defined(HAVE_ENDPWENT) && \
!defined(BUILD_STATIC)
static int stress_uid_comp(const void *p1, const void *p2)
{
const uid_t *uid1 = (const uid_t *)p1;
const uid_t *uid2 = (const uid_t *)p2;
if (*uid1 > *uid2)
return 1;
else if (*uid1 < *uid2)
return -1;
else
return 0;
}
/*
* stress_get_unused_uid()
* find the lowest free unused UID greater than 250,
* returns -1 if it can't find one and uid is set to 0;
* if successful it returns 0 and sets uid to the free uid.
*
* This also caches the uid so this can be called
* frequently. If the cached uid is in use it will
* perform the expensive lookup again.
*/
int stress_get_unused_uid(uid_t *uid)
{
static uid_t cached_uid = 0;
uid_t *uids;
*uid = 0;
/*
* If we have a cached unused uid and it's no longer
* unused then force a rescan for a new one
*/
if ((cached_uid != 0) && (getpwuid(cached_uid) != NULL))
cached_uid = 0;
if (cached_uid == 0) {
struct passwd *pw;
size_t i, n;
setpwent();
for (n = 0; getpwent() != NULL; n++) {
}
endpwent();
uids = calloc(n, sizeof(*uids));
if (!uids)
return -1;
setpwent();
for (i = 0; i < n && (pw = getpwent()) != NULL; i++) {
uids[i] = pw->pw_uid;
}
endpwent();
n = i;
qsort(uids, n, sizeof(*uids), stress_uid_comp);
/* Look for a suitable gap from uid 250 upwards */
for (i = 0; i < n - 1; i++) {
/*
* Add a large gap in case new uids
* are added to reduce free uid race window
*/
const uid_t uid_try = uids[i] + 250;
if (uids[i + 1] > uid_try) {
if (getpwuid(uid_try) == NULL) {
cached_uid = uid_try;
break;
}
}
}
free(uids);
}
/*
* Not found?
*/
if (cached_uid == 0)
return -1;
*uid = cached_uid;
return 0;
}
#else
int stress_get_unused_uid(uid_t *uid)
{
*uid = 0;
return -1;
}
#endif
/*
* stress_read_buffer()
* In addition to read() this function makes sure all bytes have been
* written. You're also able to ignore EINTR interrupts which could happen
* on alarm() in the parent process.
*/
ssize_t stress_read_buffer(int fd, void* buffer, ssize_t size, bool ignore_int)
{
ssize_t rbytes = 0, ret;
do {
char *ptr = ((char *)buffer) + rbytes;
ignore_eintr:
ret = read(fd, (void *)ptr, (size_t)(size - rbytes));
if (ignore_int && (ret < 0) && (errno == EINTR))
goto ignore_eintr;
if (ret > 0)
rbytes += ret;
} while (ret > 0 && (rbytes != size));
return (ret <= 0)? ret : rbytes;
}
/*
* stress_write_buffer()
* In addition to write() this function makes sure all bytes have been
* written. You're also able to ignore EINTR interrupts which could happen
* on alarm() in the parent process.
*/
ssize_t stress_write_buffer(int fd, void* buffer, ssize_t size, bool ignore_int)
{
ssize_t wbytes = 0, ret;
do {
char *ptr = ((char *)buffer) + wbytes;
ignore_eintr:
ret = write(fd, (void *)ptr, (size_t)(size - wbytes));
/* retry if interrupted */
if (ignore_int && (ret < 0) && (errno == EINTR))
goto ignore_eintr;
if (ret > 0)
wbytes += ret;
} while (ret > 0 && (wbytes != size));
return (ret <= 0)? ret : wbytes;
}
/*
* stress_kernel_release()
* turn release major.minor.patchlevel triplet into base 100 value
*/
int stress_kernel_release(const int major, const int minor, const int patchlevel)
{
return (major * 10000) + (minor * 100) + patchlevel;
}
/*
* stress_get_kernel_release()
* return kernel release number in base 100, e.g.
* 4.15.2 -> 401502, return -1 if failed.
*/
int stress_get_kernel_release(void)
{
#if defined(HAVE_UNAME)
struct utsname buf;
int major = 0, minor = 0, patchlevel = 0;
if (uname(&buf) < 0)
return -1;
if (sscanf(buf.release, "%d.%d.%d\n", &major, &minor, &patchlevel) < 1)
return -1;
return stress_kernel_release(major, minor, patchlevel);
#else
UNEXPECTED
return -1;
#endif
}
/*
* stress_get_unused_pid_racy()
* try to find an unused pid. This is racy and may actually
* return pid that is unused at test time but will become
* used by the time the pid is accessed.
*/
pid_t stress_get_unused_pid_racy(const bool fork_test)
{
char buf[64];
#if defined(PID_MAX_LIMIT)
pid_t max_pid = PID_MAX_LIMIT;
#elif defined(PID_MAX)
pid_t max_pid = PID_MAX;
#elif defined(PID_MAX_DEFAULT)
pid_t max_pid = PID_MAX_DEFAULT;
#else
pid_t max_pid = 32767;
#endif
int i;
pid_t pid;
uint32_t n;
(void)memset(buf, 0, sizeof(buf));
if (system_read("/proc/sys/kernel/pid_max", buf, sizeof(buf) - 1) > 0) {
max_pid = atoi(buf);
}
if (max_pid < 1024)
max_pid = 1024;
/*
* Create a child, terminate it, use this pid as an unused
* pid. Slow but should be OK if system doesn't recycle PIDs
* quickly.
*/
if (fork_test) {
pid = fork();
if (pid == 0) {
_exit(0);
} else if (pid > 0) {
int status, ret;
ret = waitpid(pid, &status, 0);
if ((ret == pid) &&
((kill(pid, 0) < 0) && (errno == ESRCH))) {
return pid;
}
}
}
/*
* Make a random PID guess.
*/
n = (uint32_t)max_pid - 1023;
for (i = 0; i < 20; i++) {
pid = (pid_t)(stress_mwc32() % n) + 1023;
if ((kill(pid, 0) < 0) && (errno == ESRCH))
return pid;
}
/*
* Give up.
*/
return max_pid;
}
/*
* stress_read_fdinfo()
* read the fdinfo for a specific pid's fd, Linux only
*/
int stress_read_fdinfo(const pid_t pid, const int fd)
{
#if defined(__linux__)
char path[PATH_MAX];
char buf[4096];
(void)snprintf(path, sizeof(path), "/proc/%d/fdinfo/%d",
(int)pid, fd);
return (int)system_read(path, buf, sizeof(buf));
#else
(void)pid;
(void)fd;
return 0;
#endif
}
/*
* stress_hostname_length()
* return the maximum allowed hostname length
*/
size_t stress_hostname_length(void)
{
#if defined(HOST_NAME_MAX)
return HOST_NAME_MAX + 1;
#elif defined(HAVE_UNAME) && \
defined(HAVE_SYS_UTSNAME_H)
struct utsname uts;
return sizeof(uts.nodename); /* Linux */
#else
return 255 + 1; /* SUSv2 */
#endif
}
/*
* stress_min_aux_sig_stack_size()
* For ARM we should check AT_MINSIGSTKSZ as this
* also includes SVE register saving overhead
* https://blog.linuxplumbersconf.org/2017/ocw/system/presentations/4671/original/plumbers-dm-2017.pdf
*/
static inline long stress_min_aux_sig_stack_size(void)
{
#if defined(HAVE_SYS_AUXV_H) && \
defined(HAVE_GETAUXVAL) && \
defined(AT_MINSIGSTKSZ)
long sz = getauxval(AT_MINSIGSTKSZ);
if (sz > 0)
return sz;
#else
UNEXPECTED
#endif
return -1;
}
/*
* stress_sig_stack_size()
* wrapper for STRESS_SIGSTKSZ, try and find
* stack size required
*/
size_t stress_sig_stack_size(void)
{
static long sz = -1, min;
/* return cached copy */
if (sz > 0)
return sz;
min = stress_min_aux_sig_stack_size();
#if defined(_SC_SIGSTKSZ)
sz = sysconf(_SC_SIGSTKSZ);
if (min < sz)
min = sz;
#endif
#if defined(SIGSTKSZ)
if (SIGSTKSZ > min) {
/* SIGSTKSZ may be sysconf(_SC_SIGSTKSZ) */
min = SIGSTKSZ;
}
#endif
if (min < STRESS_ABS_MIN_STACK_SIZE)
min = STRESS_ABS_MIN_STACK_SIZE;
sz = min;
return (size_t)sz;
}
/*
* stress_min_sig_stack_size()
* wrapper for STRESS_MINSIGSTKSZ
*/
size_t stress_min_sig_stack_size(void)
{
static long sz = -1, min;
/* return cached copy */
if (sz > 0)
return sz;
min = stress_min_aux_sig_stack_size();
#if defined(_SC_MINSIGSTKSZ)
sz = sysconf(_SC_MINSIGSTKSZ);
if (min < sz)
min = sz;
#endif
#if defined(SIGSTKSZ)
if (SIGSTKSZ > min) {
/* SIGSTKSZ may be sysconf(_SC_SIGSTKSZ) */
min = SIGSTKSZ;
}
#endif
if (min < STRESS_ABS_MIN_STACK_SIZE)
min = STRESS_ABS_MIN_STACK_SIZE;
sz = min;
return (size_t)sz;
}
/*
* stress_min_pthread_stack_size()
* return the minimum size of stack for a pthread
*/
size_t stress_min_pthread_stack_size(void)
{
static long sz = -1, min;
/* return cached copy */
if (sz > 0)
return sz;
min = stress_min_aux_sig_stack_size();
#if defined(__SC_THREAD_STACK_MIN_VALUE)
sz = sysconf(__SC_THREAD_STACK_MIN_VALUE);
if (sz > min)
min = sz;
#endif
#if defined(_SC_THREAD_STACK_MIN_VALUE)
sz = sysconf(_SC_THREAD_STACK_MIN_VALUE);
if (sz > min)
min = sz;
#endif
#if defined(PTHREAD_STACK_MIN)
if (PTHREAD_STACK_MIN > min)
min = PTHREAD_STACK_MIN;
#endif
if (8192 > min)
min = 8192;
sz = min;
return (size_t)sz;
}
/*
* stress_sig_handler_exit()
* signal handler that exits a process via _exit(0) for
* immediate dead stop termination.
*/
void NORETURN MLOCKED_TEXT stress_sig_handler_exit(int signum)
{
(void)signum;
_exit(0);
}
/*
* __stack_chk_fail()
* override stack smashing callback
*/
#if (defined(__GNUC__) || defined(__clang__)) && \
defined(HAVE_WEAK_ATTRIBUTE)
extern void __stack_chk_fail(void);
NORETURN WEAK void __stack_chk_fail(void)
{
if (stress_stack_check_flag) {
(void)fprintf(stderr, "Stack overflow detected! Aborting stress-ng.\n");
(void)fflush(stderr);
abort();
}
/* silently exit */
_exit(0);
}
#endif
/*
* stress_set_stack_smash_check_flag()
* set flag, true = report flag, false = silently ignore
*/
void stress_set_stack_smash_check_flag(const bool flag)
{
stress_stack_check_flag = flag;
}
int stress_tty_width(void)
{
const int max_width = 80;
#if defined(HAVE_WINSIZE) && \
defined(TIOCGWINSZ)
struct winsize ws;
int ret;
ret = ioctl(fileno(stdout), TIOCGWINSZ, &ws);
if (ret < 0)
return max_width;
ret = (int)ws.ws_col;
if ((ret < 0) || (ret > 1024))
return max_width;
return ret;
#else
UNEXPECTED
return max_width;
#endif
}
/*
* stress_get_extents()
* try to determine number extents in a file
*/
size_t stress_get_extents(const int fd)
{
#if defined(FS_IOC_FIEMAP) && \
defined(HAVE_LINUX_FIEMAP_H)
struct fiemap fiemap;
(void)memset(&fiemap, 0, sizeof(fiemap));
fiemap.fm_length = ~0UL;
/* Find out how many extents there are */
if (ioctl(fd, FS_IOC_FIEMAP, &fiemap) < 0)
return 0;
return fiemap.fm_mapped_extents;
#else
UNEXPECTED
(void)fd;
return 0;
#endif
}
/*
* stress_redo_fork()
* check fork errno (in err) and return true if
* an immediate fork can be retried due to known
* error cases that are retryable. Also force a
* scheduling yield.
*/
bool stress_redo_fork(const int err)
{
if (keep_stressing_flag() &&
((err == EAGAIN) || (err == EINTR) || (err == ENOMEM))) {
(void)shim_sched_yield();
return true;
}
return false;
}
/*
* stress_sighandler_nop()
* no-operation signal handler
*/
void stress_sighandler_nop(int sig)
{
(void)sig;
}
/*
* stress_clear_warn_once()
* clear the linux warn once warnings flag, kernel warn once
* messages can be re-issued
*/
void stress_clear_warn_once(void)
{
#if defined(__linux__)
if (stress_check_capability(SHIM_CAP_IS_ROOT))
(void)system_write("/sys/kernel/debug/clear_warn_once", "1", 1);
#endif
}
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/hoperun_harmony/stress-ng.git
git@gitee.com:hoperun_harmony/stress-ng.git
hoperun_harmony
stress-ng
stress-ng
master

搜索帮助

23e8dbc6 1850385 7e0993f3 1850385