398 Star 1.4K Fork 1.3K

GVPopenEuler / kernel

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
克隆/下载
unf_exchg.c 109.17 KB
一键复制 编辑 原始数据 按行查看 历史
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "unf_log.h"
#include "unf_common.h"
#include "unf_exchg.h"
#include "unf_rport.h"
#include "unf_service.h"
#include "unf_io.h"
#define UNF_DEL_XCHG_TIMER_SAFE(v_xchg) \
do { \
if (cancel_delayed_work(&((v_xchg)->timeout_work))) { \
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, \
UNF_MAJOR, \
"Exchange(0x%p) is free, but timer is pending.", \
v_xchg); \
} else { \
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, \
UNF_CRITICAL, \
"Exchange(0x%p) is free, but timer is running.", \
v_xchg); \
} \
} while (0)
#define UNF_XCHG_IS_ELS_REPLY(v_xchg) \
((((v_xchg)->cmnd_code & 0x0ffff) == ELS_ACC) || \
(((v_xchg)->cmnd_code & 0x0ffff) == ELS_RJT))
static struct unf_ioflow_id_s io_stage[] = {
{ "XCHG_ALLOC" },
{ "TGT_RECEIVE_ABTS" },
{ "TGT_ABTS_DONE" },
{ "TGT_IO_SRR" },
{ "SFS_RESPONSE" },
{ "SFS_TIMEOUT" },
{ "INI_SEND_CMND" },
{ "INI_RESPONSE_DONE" },
{ "INI_EH_ABORT" },
{ "INI_EH_DEVICE_RESET" },
{ "INI_EH_BLS_DONE" },
{ "INI_IO_TIMEOUT" },
{ "INI_REQ_TIMEOUT" },
{ "XCHG_CANCEL_TIMER" },
{ "XCHG_FREE_XCHG" },
{ "SEND_ELS" },
{ "IO_XCHG_WAIT" },
};
void unf_wakeup_scsi_task_cmnd(struct unf_lport_s *v_lport)
{
struct list_head *node = NULL;
struct list_head *next_node = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long hot_pool_lock_flags = 0;
unsigned long xchg_flag = 0;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
unsigned int i = 0;
UNF_CHECK_VALID(0x850, UNF_TRUE, v_lport, return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i);
if (!xchg_mgr) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_EVENT, UNF_MINOR,
"Can't find LPort(0x%x) MgrIdx %u exchange manager.",
v_lport->port_id, i);
continue;
}
spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
list_for_each_safe(node, next_node,
&xchg_mgr->hot_pool->ini_busylist) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag);
if (INI_IO_STATE_UPTASK & xchg->io_state &&
(atomic_read(&xchg->ref_cnt) > 0)) {
UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_SUCCESS);
up(&xchg->task_sema);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_EVENT, UNF_MINOR,
"Wake up task command exchange(0x%p), Hot Pool Tag(0x%x).",
xchg, xchg->hot_pool_tag);
}
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flag);
}
spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
}
}
void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int v_sid, unsigned int v_did,
unsigned int v_extra_io_state)
{
/*
* for target session: set ABORT
* 1. R_Port remove
* 2. Send PLOGI_ACC callback
* 3. RCVD PLOGI
* 4. RCVD LOGO
*/
UNF_CHECK_VALID(0x852, UNF_TRUE, v_lport, return);
if (v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort) {
/* The SID/DID of the Xchg is in reverse direction in
* different phases. Therefore, the reverse direction
* needs to be considered
*/
v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort(
v_lport,
v_rport,
v_sid, v_did,
v_extra_io_state);
v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort(
v_lport, v_rport,
v_did, v_sid,
v_extra_io_state);
}
}
void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int v_sid, unsigned int v_did)
{
UNF_CHECK_VALID(0x990, UNF_TRUE, v_lport, return);
if (v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort) {
/* The SID/DID of the Xchg is in reverse direction in different
* phases, therefore, the reverse direction
* needs to be considered
*/
v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort(v_lport,
v_rport,
v_sid,
v_did);
v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort(v_lport,
v_rport,
v_did,
v_sid);
}
}
void unf_cm_xchg_abort_by_lun(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned long long v_lun_id,
void *v_tm_xchg,
int v_abort_all_lun_flag)
{
/*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
void (*unf_xchg_abort_by_lun)(void*, void*, unsigned long long,
void*, int) = NULL;
UNF_CHECK_VALID(0x853, UNF_TRUE, v_lport, return);
unf_xchg_abort_by_lun =
v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_lun;
if (unf_xchg_abort_by_lun) {
unf_xchg_abort_by_lun((void *)v_lport, (void *)v_rport,
v_lun_id, v_tm_xchg,
v_abort_all_lun_flag);
}
}
void unf_cm_xchg_abort_by_session(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport)
{
void (*pfn_unf_xchg_abort_by_session)(void*, void*) = NULL;
UNF_CHECK_VALID(0x853, UNF_TRUE, v_lport, return);
pfn_unf_xchg_abort_by_session =
v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_session;
if (pfn_unf_xchg_abort_by_session) {
pfn_unf_xchg_abort_by_session((void *)v_lport,
(void *)v_rport);
}
}
void *unf_cm_get_free_xchg(void *v_lport, unsigned int v_xchg_type)
{
struct unf_lport_s *lport = NULL;
struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL;
UNF_CHECK_VALID(0x855, UNF_TRUE, unlikely(v_lport), return NULL);
lport = (struct unf_lport_s *)v_lport;
xch_mgr_temp = &lport->xchg_mgr_temp;
/* Find the corresponding Lport Xchg management template. */
UNF_CHECK_VALID(0x856, UNF_TRUE,
unlikely(xch_mgr_temp->pfn_unf_xchg_get_free_and_init),
return NULL);
return xch_mgr_temp->pfn_unf_xchg_get_free_and_init(lport, v_xchg_type,
INVALID_VALUE16);
}
void unf_cm_free_xchg(void *v_lport, void *v_xchg)
{
struct unf_lport_s *lport = NULL;
struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL;
UNF_CHECK_VALID(0x857, UNF_TRUE, unlikely(v_lport), return);
UNF_CHECK_VALID(0x858, UNF_TRUE, unlikely(v_xchg), return);
lport = (struct unf_lport_s *)v_lport;
xch_mgr_temp = &lport->xchg_mgr_temp;
UNF_CHECK_VALID(0x859, UNF_TRUE,
unlikely(xch_mgr_temp->pfn_unf_xchg_release),
return);
/*
* unf_cm_free_xchg --->>> unf_free_xchg
* --->>> unf_xchg_ref_dec --->>> unf_free_fcp_xchg
* --->>> unf_done_ini_xchg
*/
xch_mgr_temp->pfn_unf_xchg_release(v_lport, v_xchg);
}
void *unf_cm_lookup_xchg_by_tag(void *v_lport, unsigned short v_hot_pool_tag)
{
struct unf_lport_s *lport = NULL;
struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL;
UNF_CHECK_VALID(0x860, UNF_TRUE, unlikely(v_lport), return NULL);
/* Find the corresponding Lport Xchg management template */
lport = (struct unf_lport_s *)v_lport;
xch_mgr_temp = &lport->xchg_mgr_temp;
UNF_CHECK_VALID(0x861, UNF_TRUE,
unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_tag),
return NULL);
return xch_mgr_temp->pfn_unf_look_up_xchg_by_tag(v_lport,
v_hot_pool_tag);
}
void *unf_cm_lookup_xchg_by_id(void *v_lport, unsigned short v_ox_id,
unsigned int v_oid)
{
struct unf_lport_s *lport = NULL;
struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL;
UNF_CHECK_VALID(0x862, UNF_TRUE, unlikely(v_lport), return NULL);
lport = (struct unf_lport_s *)v_lport;
xch_mgr_temp = &lport->xchg_mgr_temp;
/* Find the corresponding Lport Xchg management template */
UNF_CHECK_VALID(0x863, UNF_TRUE,
unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_id),
return NULL);
return xch_mgr_temp->pfn_unf_look_up_xchg_by_id(v_lport, v_ox_id,
v_oid);
}
struct unf_xchg_s *unf_cm_lookup_xchg_by_cmnd_sn(
void *v_lport,
unsigned long long v_command_sn,
unsigned int v_world_id)
{
struct unf_lport_s *lport = NULL;
struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL;
struct unf_xchg_s *xchg = NULL;
UNF_CHECK_VALID(0x864, UNF_TRUE, unlikely(v_lport), return NULL);
lport = (struct unf_lport_s *)v_lport;
xch_mgr_temp = &lport->xchg_mgr_temp;
UNF_CHECK_VALID(
0x865, UNF_TRUE,
unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_cmnd_sn),
return NULL);
xchg =
(struct unf_xchg_s *)xch_mgr_temp->pfn_unf_look_up_xchg_by_cmnd_sn(
lport, v_command_sn,
v_world_id);
return xchg;
}
static void unf_free_all_rsp_pages(struct unf_xchg_mgr_s *v_xchg_mgr)
{
unsigned int buff_index;
UNF_CHECK_VALID(0x868, UNF_TRUE, v_xchg_mgr, return);
if (v_xchg_mgr->rsp_buf_list.buflist) {
for (buff_index = 0; buff_index <
v_xchg_mgr->rsp_buf_list.buf_num;
buff_index++) {
if (v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr) {
dma_free_coherent(
&v_xchg_mgr->hot_pool->lport->low_level_func.dev->dev,
v_xchg_mgr->rsp_buf_list.buf_size,
v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr,
v_xchg_mgr->rsp_buf_list.buflist[buff_index].paddr);
v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr = NULL;
}
}
kfree(v_xchg_mgr->rsp_buf_list.buflist);
v_xchg_mgr->rsp_buf_list.buflist = NULL;
}
}
static unsigned int unf_init_xchg(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr,
unsigned int v_xchg_sum,
unsigned int v_sfs_sum)
{
struct unf_xchg_s *xchg_mem = NULL;
union unf_sfs_u *sfs_mm_start = NULL;
dma_addr_t sfs_dma_addr;
struct unf_xchg_s *xchg = NULL;
struct unf_xchg_free_pool_s *free_pool = NULL;
unsigned int rsp_iu_nums_per_page = 0;
unsigned int rsp_iu_size = 0;
unsigned long flags = 0;
unsigned int xchg_sum = 0;
unsigned int i = 0;
unsigned int rsp_iu_loop = 0;
unsigned int buf_num;
unsigned int buf_size;
unsigned int curbuf_idx = 0;
void *page_addr;
dma_addr_t phy_addr;
UNF_CHECK_VALID(0x871, UNF_TRUE, v_sfs_sum <= v_xchg_sum,
return UNF_RETURN_ERROR);
free_pool = &v_xchg_mgr->free_pool;
xchg_sum = v_xchg_sum;
xchg_mem = v_xchg_mgr->fcp_mm_start;
xchg = xchg_mem;
sfs_mm_start = (union unf_sfs_u *)v_xchg_mgr->sfs_mm_start;
sfs_dma_addr = v_xchg_mgr->sfs_phy_addr;
/* 1. Allocate the SFS UNION memory to each SFS XCHG
* and mount the SFS XCHG to the corresponding FREE linked list
*/
free_pool->total_sfs_xchg = 0;
free_pool->sfs_xchg_sum = v_sfs_sum;
for (i = 0; i < v_sfs_sum; i++) {
INIT_LIST_HEAD(&xchg->list_xchg_entry);
INIT_LIST_HEAD(&xchg->list_esgls);
spin_lock_init(&xchg->xchg_state_lock);
sema_init(&xchg->task_sema, 0);
sema_init(&xchg->echo_info.echo_sync_sema, 0);
spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags);
xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr = sfs_mm_start;
xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr = sfs_dma_addr;
xchg->fcp_sfs_union.sfs_entry.sfs_buff_len =
sizeof(*sfs_mm_start);
list_add_tail(&xchg->list_xchg_entry,
&free_pool->list_sfs_xchg_list);
free_pool->total_sfs_xchg++;
spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags);
sfs_mm_start++;
sfs_dma_addr = sfs_dma_addr + sizeof(union unf_sfs_u);
xchg++;
}
/*
* 2. Allocate RSP IU memory for each IO XCHG and mount IO
* XCHG to the corresponding FREE linked list
* The memory size of each RSP IU is rsp_iu_size.
*/
rsp_iu_size = (UNF_FCPRSP_CTL_LEN + UNF_MAX_RSP_INFO_LEN +
UNF_SCSI_SENSE_DATA_LEN);
buf_size = BUF_LIST_PAGE_SIZE;
if ((xchg_sum - v_sfs_sum) * rsp_iu_size < BUF_LIST_PAGE_SIZE)
buf_size = (xchg_sum - v_sfs_sum) * rsp_iu_size;
rsp_iu_nums_per_page = buf_size / rsp_iu_size;
buf_num = (xchg_sum - v_sfs_sum) % rsp_iu_nums_per_page ?
(xchg_sum - v_sfs_sum) / rsp_iu_nums_per_page + 1 :
(xchg_sum - v_sfs_sum) / rsp_iu_nums_per_page;
v_xchg_mgr->rsp_buf_list.buflist =
(struct buff_list_s *)kmalloc(
buf_num * sizeof(struct buff_list_s),
GFP_KERNEL);
v_xchg_mgr->rsp_buf_list.buf_num = buf_num;
v_xchg_mgr->rsp_buf_list.buf_size = buf_size;
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) buff num 0x%x buff size 0x%x",
v_lport->port_id, buf_num,
v_xchg_mgr->rsp_buf_list.buf_size);
if (!v_xchg_mgr->rsp_buf_list.buflist) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[err]Allocate BigSfs pool buf list failed out of memory");
goto free_buff;
}
memset(v_xchg_mgr->rsp_buf_list.buflist, 0,
buf_num * sizeof(struct buff_list_s));
free_pool->total_fcp_xchg = 0;
for (i = 0, curbuf_idx = 0; curbuf_idx < buf_num; curbuf_idx++) {
page_addr = dma_alloc_coherent(
&v_lport->low_level_func.dev->dev,
v_xchg_mgr->rsp_buf_list.buf_size,
&phy_addr, GFP_KERNEL);
if (!page_addr)
goto free_buff;
memset(page_addr, 0, v_xchg_mgr->rsp_buf_list.buf_size);
v_xchg_mgr->rsp_buf_list.buflist[curbuf_idx].vaddr = page_addr;
v_xchg_mgr->rsp_buf_list.buflist[curbuf_idx].paddr = phy_addr;
for (rsp_iu_loop = 0;
(rsp_iu_loop < rsp_iu_nums_per_page &&
i < xchg_sum - v_sfs_sum); rsp_iu_loop++) {
INIT_LIST_HEAD(&xchg->list_xchg_entry);
INIT_LIST_HEAD(&xchg->list_esgls);
spin_lock_init(&xchg->xchg_state_lock);
sema_init(&xchg->task_sema, 0);
sema_init(&xchg->echo_info.echo_sync_sema, 0);
/* alloc dma buffer for fcp_rsp_iu */
spin_lock_irqsave(&free_pool->xchg_free_pool_lock,
flags);
xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu =
(struct unf_fcprsp_iu_s *)page_addr;
xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu_phy_addr =
phy_addr;
list_add_tail(&xchg->list_xchg_entry,
&free_pool->list_free_xchg_list);
free_pool->total_fcp_xchg++;
spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock,
flags);
page_addr += rsp_iu_size;
phy_addr += rsp_iu_size;
i++;
xchg++;
}
}
free_pool->fcp_xchg_sum = free_pool->total_fcp_xchg;
return RETURN_OK;
free_buff:
unf_free_all_rsp_pages(v_xchg_mgr);
return UNF_RETURN_ERROR;
}
static unsigned int unf_get_xchg_config_sum(struct unf_lport_s *v_lport,
unsigned int *v_xchg_sum)
{
struct unf_lport_cfg_item_s *lport_cfg_items = NULL;
lport_cfg_items = &v_lport->low_level_func.lport_cfg_items;
/* It has been checked at the bottom layer.
* Don't need to check it again.
*/
*v_xchg_sum = lport_cfg_items->max_sfs_xchg + lport_cfg_items->max_io;
if ((*v_xchg_sum / UNF_EXCHG_MGR_NUM) == 0 ||
lport_cfg_items->max_sfs_xchg / UNF_EXCHG_MGR_NUM == 0) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) Xchgsum(%u) or SfsXchg(%u) is less than ExchangeMgrNum(%u).",
v_lport->port_id, *v_xchg_sum,
lport_cfg_items->max_sfs_xchg,
UNF_EXCHG_MGR_NUM);
return UNF_RETURN_ERROR;
}
if (*v_xchg_sum > (INVALID_VALUE16 - 1)) {
/* If the format of ox_id/rx_id is exceeded,
* this function is not supported
*/
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) Exchange num(0x%x) is Too Big.",
v_lport->port_id, *v_xchg_sum);
return UNF_RETURN_ERROR;
}
return RETURN_OK;
}
static void unf_xchg_cancel_timer(void *v_xchg)
{
struct unf_xchg_s *xchg = NULL;
int need_dec_xchg_ref = UNF_FALSE;
unsigned long flag = 0;
UNF_CHECK_VALID(0x874, UNF_TRUE, v_xchg, return);
xchg = (struct unf_xchg_s *)v_xchg;
spin_lock_irqsave(&xchg->xchg_state_lock, flag);
if (cancel_delayed_work(&xchg->timeout_work))
need_dec_xchg_ref = UNF_TRUE;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
if (need_dec_xchg_ref == UNF_TRUE)
unf_xchg_ref_dec(v_xchg, XCHG_CANCEL_TIMER);
}
void unf_show_all_xchg(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr)
{
struct unf_lport_s *lport = NULL;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_xchg_s *xchg = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x879, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x880, UNF_TRUE, v_xchg_mgr, return);
UNF_REFERNCE_VAR(lport);
UNF_REFERNCE_VAR(xchg);
xchg_mgr = v_xchg_mgr;
lport = v_lport;
/* hot Xchg */
spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock, flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN,
"INI busy :");
list_for_each_safe(xchg_node, next_xchg_node,
&xchg_mgr->hot_pool->ini_busylist) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR,
"0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.",
xchg,
(unsigned int)xchg->hot_pool_tag,
(unsigned int)xchg->xchg_type,
(unsigned int)xchg->ox_id,
(unsigned int)xchg->rx_id,
(unsigned int)xchg->sid,
(unsigned int)xchg->did,
atomic_read(&xchg->ref_cnt),
(unsigned int)xchg->io_state,
xchg->alloc_jif);
}
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL,
UNF_WARN, "SFS :");
list_for_each_safe(xchg_node, next_xchg_node,
&xchg_mgr->hot_pool->sfs_busylist) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN,
"0x%p---0x%x---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.",
xchg,
xchg->cmnd_code,
(unsigned int)xchg->hot_pool_tag,
(unsigned int)xchg->xchg_type,
(unsigned int)xchg->ox_id,
(unsigned int)xchg->rx_id,
(unsigned int)xchg->sid,
(unsigned int)xchg->did,
atomic_read(&xchg->ref_cnt),
(unsigned int)xchg->io_state,
xchg->alloc_jif);
}
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN,
"Destroy list.");
list_for_each_safe(xchg_node, next_xchg_node,
&xchg_mgr->hot_pool->list_destroy_xchg) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN,
"0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.",
xchg,
(unsigned int)xchg->hot_pool_tag,
(unsigned int)xchg->xchg_type,
(unsigned int)xchg->ox_id,
(unsigned int)xchg->rx_id,
(unsigned int)xchg->sid,
(unsigned int)xchg->did,
atomic_read(&xchg->ref_cnt),
(unsigned int)xchg->io_state,
xchg->alloc_jif);
}
spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hot_pool_lock, flags);
UNF_REFERNCE_VAR(xchg);
UNF_REFERNCE_VAR(lport);
}
static void unf_delay_work_del_syn(struct unf_xchg_s *v_xchg)
{
struct unf_xchg_s *xchg = NULL;
UNF_CHECK_VALID(0x884, UNF_TRUE, v_xchg, return);
xchg = v_xchg;
/* synchronous release timer */
if (!cancel_delayed_work_sync(&xchg->timeout_work)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Exchange(0x%p), State(0x%x) can't delete work timer, timer is running or no timer.",
xchg, xchg->io_state);
} else {
/* The reference count cannot be directly subtracted.
* This prevents the XCHG from being moved to the
* Free linked list when the card is unloaded.
*/
unf_cm_free_xchg(xchg->lport, xchg);
}
}
static void unf_free_lport_sfs_xchg(struct unf_xchg_mgr_s *v_xchg_mgr,
int v_done_ini_flag)
{
struct list_head *list = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long hot_pool_lock_flags = 0;
UNF_REFERNCE_VAR(v_done_ini_flag);
UNF_CHECK_VALID(0x887, UNF_TRUE, v_xchg_mgr, return);
UNF_CHECK_VALID(0x888, UNF_TRUE, v_xchg_mgr->hot_pool, return);
spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
while (!list_empty(&v_xchg_mgr->hot_pool->sfs_busylist)) {
list = (&v_xchg_mgr->hot_pool->sfs_busylist)->next;
list_del_init(list);
/* Prevent the xchg of the sfs from being accessed repeatedly.
* The xchg is first mounted to the destroy linked list.
*/
list_add_tail(list, &v_xchg_mgr->hot_pool->list_destroy_xchg);
xchg = list_entry(list, struct unf_xchg_s, list_xchg_entry);
spin_unlock_irqrestore(
&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
unf_delay_work_del_syn(xchg);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Free SFS Exchange(0x%p), State(0x%x), Reference count(%d), Start time(%llu).",
xchg, xchg->io_state, atomic_read(&xchg->ref_cnt),
xchg->alloc_jif);
unf_cm_free_xchg(xchg->lport, xchg);
spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
}
spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
}
static void unf_free_lport_destroy_xchg(struct unf_xchg_mgr_s *v_xchg_mgr)
{
#define UNF_WAIT_DESTROY_EMPTY_STEP_MS 1000
#define UNF_WAIT_IO_STATE_TGT_FRONT_MS (10 * 1000)
struct unf_xchg_s *xchg = NULL;
struct list_head *next_xchg_node = NULL;
unsigned long hot_pool_lock_flags = 0;
unsigned long xchg_flag = 0;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg_mgr, return);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg_mgr->hot_pool,
return);
/* In this case, the timer on the destroy linked list is deleted.
* You only need to check whether the timer is released
* at the end of the tgt.
*/
spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
while (!list_empty(&v_xchg_mgr->hot_pool->list_destroy_xchg)) {
next_xchg_node =
(&v_xchg_mgr->hot_pool->list_destroy_xchg)->next;
xchg = list_entry(next_xchg_node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Free Exchange(0x%p), Type(0x%x), State(0x%x), Reference count(%d), Start time(%llu)",
xchg, xchg->xchg_type, xchg->io_state,
atomic_read(&xchg->ref_cnt),
xchg->alloc_jif);
spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag);
spin_unlock_irqrestore(
&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
/* This interface can be invoked to ensure that
* the timer is successfully canceled
* or wait until the timer execution is complete
*/
unf_delay_work_del_syn(xchg);
/*
* If the timer is canceled successfully, delete Xchg
* If the timer has burst, the Xchg may have been released,
* In this case, deleting the Xchg will be failed
*/
unf_cm_free_xchg(xchg->lport, xchg);
spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
};
spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
}
static unsigned int unf_free_lport_xchg(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr)
{
#define UNF_OS_WAITIO_TIMEOUT (10 * 1000)
unsigned long free_pool_lock_flags = 0;
int wait = UNF_FALSE;
unsigned int total_xchg = 0;
unsigned int total_xchg_sum = 0;
unsigned int ret = RETURN_OK;
unsigned long long timeout = 0;
struct completion xchg_mgr_completion =
COMPLETION_INITIALIZER(xchg_mgr_completion);
UNF_CHECK_VALID(0x881, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x882, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x883, UNF_TRUE, v_xchg_mgr->hot_pool,
return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_lport);
unf_free_lport_sfs_xchg(v_xchg_mgr, UNF_FALSE);
/* free INI Mode exchanges belong to L_Port */
unf_free_lport_ini_xchg(v_xchg_mgr, UNF_FALSE);
spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock,
free_pool_lock_flags);
total_xchg = v_xchg_mgr->free_pool.total_fcp_xchg +
v_xchg_mgr->free_pool.total_sfs_xchg;
total_xchg_sum = v_xchg_mgr->free_pool.fcp_xchg_sum +
v_xchg_mgr->free_pool.sfs_xchg_sum;
if (total_xchg != total_xchg_sum) {
v_xchg_mgr->free_pool.xchg_mgr_completion =
&xchg_mgr_completion;
wait = UNF_TRUE;
}
spin_unlock_irqrestore(&v_xchg_mgr->free_pool.xchg_free_pool_lock,
free_pool_lock_flags);
if (wait == UNF_TRUE) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) begin to wait for exchange manager completion(%ld) (0x%x:0x%x)",
v_lport->port_id, jiffies, total_xchg,
total_xchg_sum);
unf_show_all_xchg(v_lport, v_xchg_mgr);
timeout = wait_for_completion_timeout(
v_xchg_mgr->free_pool.xchg_mgr_completion,
msecs_to_jiffies(UNF_OS_WAITIO_TIMEOUT));
if (timeout == 0)
unf_free_lport_destroy_xchg(v_xchg_mgr);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) wait for exchange manager completion end",
v_lport->port_id);
spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock,
free_pool_lock_flags);
v_xchg_mgr->free_pool.xchg_mgr_completion = NULL;
spin_unlock_irqrestore(
&v_xchg_mgr->free_pool.xchg_free_pool_lock,
free_pool_lock_flags);
}
return ret;
}
void unf_free_lport_all_xchg(struct unf_lport_s *v_lport)
{
struct unf_xchg_mgr_s *xchg_mgr;
unsigned int i;
UNF_CHECK_VALID(0x881, UNF_TRUE, v_lport, return);
UNF_REFERNCE_VAR(v_lport);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i);
if (unlikely(!xchg_mgr)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) hot pool is NULL",
v_lport->port_id);
continue;
}
unf_free_lport_sfs_xchg(xchg_mgr, UNF_FALSE);
/* free INI Mode exchanges belong to L_Port */
unf_free_lport_ini_xchg(xchg_mgr, UNF_FALSE);
unf_free_lport_destroy_xchg(xchg_mgr);
}
}
void unf_free_lport_ini_xchg(struct unf_xchg_mgr_s *v_xchg_mgr,
int v_done_ini_flag)
{
/*
* 1. L_Port destroy
* 2. AC power down
*/
struct list_head *list = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long hot_pool_lock_flags = 0;
unsigned int up_status = 0;
UNF_REFERNCE_VAR(v_done_ini_flag);
UNF_CHECK_VALID(0x889, UNF_TRUE, v_xchg_mgr, return);
UNF_CHECK_VALID(0x890, UNF_TRUE, v_xchg_mgr->hot_pool, return);
spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
while (!list_empty(&v_xchg_mgr->hot_pool->ini_busylist)) {
/* for each INI busy_list (exchange) node */
list = (&v_xchg_mgr->hot_pool->ini_busylist)->next;
/* Put exchange node to destroy_list, prevent done repeatly */
list_del_init(list);
list_add_tail(list, &v_xchg_mgr->hot_pool->list_destroy_xchg);
xchg = list_entry(list, struct unf_xchg_s, list_xchg_entry);
if (atomic_read(&xchg->ref_cnt) <= 0)
continue;
spin_unlock_irqrestore(
&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
unf_delay_work_del_syn(xchg);
/* In the case of INI done, the command should be set to fail
* to prevent data inconsistency caused by the return of OK
*/
up_status = unf_get_uplevel_cmnd_errcode(
xchg->scsi_cmnd_info.err_code_table,
xchg->scsi_cmnd_info.err_code_table_cout,
UNF_IO_PORT_LOGOUT);
if (xchg->io_state & INI_IO_STATE_UPABORT) {
/*
* About L_Port destroy or AC power down:
* UP_ABORT ---to--->>> ABORT_Port_Removing
*/
up_status = UNF_IO_ABORT_PORT_REMOVING;
}
xchg->scsi_cmnd_info.result = up_status;
up(&xchg->task_sema);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Free INI exchange(0x%p) state(0x%x) reference count(%d) start time(%llu)",
xchg, xchg->io_state, atomic_read(&xchg->ref_cnt),
xchg->alloc_jif);
unf_cm_free_xchg(xchg->lport, xchg);
/* go to next INI busy_list (exchange) node */
spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
}
spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
}
static void unf_free_all_big_sfs(struct unf_xchg_mgr_s *v_xchg_mgr)
{
struct unf_xchg_mgr_s *xchg_mgr = v_xchg_mgr;
struct unf_big_sfs_s *big_sfs = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flag = 0;
unsigned int buff_index;
UNF_CHECK_VALID(0x891, UNF_TRUE, xchg_mgr, return);
/* Release the free resources in the busy state */
spin_lock_irqsave(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, flag);
list_for_each_safe(node, next_node,
&xchg_mgr->st_big_sfs_pool.list_busy_pool) {
list_del(node);
list_add_tail(node, &xchg_mgr->st_big_sfs_pool.list_free_pool);
}
list_for_each_safe(node, next_node,
&xchg_mgr->st_big_sfs_pool.list_free_pool) {
list_del(node);
big_sfs = list_entry(node, struct unf_big_sfs_s,
entry_big_sfs);
if (big_sfs->vaddr)
big_sfs->vaddr = NULL;
}
spin_unlock_irqrestore(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock,
flag);
if (xchg_mgr->big_sfs_buf_list.buflist) {
for (buff_index = 0;
buff_index < xchg_mgr->big_sfs_buf_list.buf_num;
buff_index++) {
if (xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr) {
kfree(xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr);
xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr = NULL;
}
}
kfree(xchg_mgr->big_sfs_buf_list.buflist);
xchg_mgr->big_sfs_buf_list.buflist = NULL;
}
}
static void unf_free_big_sfs_pool(struct unf_xchg_mgr_s *v_xchg_mgr)
{
UNF_CHECK_VALID(0x892, UNF_TRUE, v_xchg_mgr, return);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"Free Big SFS Pool, Count(0x%x).",
v_xchg_mgr->st_big_sfs_pool.free_count);
unf_free_all_big_sfs(v_xchg_mgr);
v_xchg_mgr->st_big_sfs_pool.free_count = 0;
if (v_xchg_mgr->st_big_sfs_pool.big_sfs_pool) {
vfree(v_xchg_mgr->st_big_sfs_pool.big_sfs_pool);
v_xchg_mgr->st_big_sfs_pool.big_sfs_pool = NULL;
}
}
static void unf_free_xchg_mgr_mem(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned int i = 0;
unsigned int xchg_sum = 0;
struct unf_xchg_free_pool_s *free_pool = NULL;
UNF_CHECK_VALID(0x893, UNF_TRUE, v_xchg_mgr, return);
xchg_mgr = v_xchg_mgr;
/* Release the reserved Rsp IU Page */
unf_free_all_rsp_pages(xchg_mgr);
unf_free_big_sfs_pool(xchg_mgr);
/* The sfs is released first, and the XchgMgr is allocated
* by the get free page.
* Therefore, the XchgMgr is compared with the '0'
*/
if (xchg_mgr->sfs_mm_start != 0) {
dma_free_coherent(&v_lport->low_level_func.dev->dev,
xchg_mgr->sfs_mem_size,
xchg_mgr->sfs_mm_start,
xchg_mgr->sfs_phy_addr);
xchg_mgr->sfs_mm_start = 0;
}
/* Release Xchg first */
if (xchg_mgr->fcp_mm_start) {
unf_get_xchg_config_sum(v_lport, &xchg_sum);
xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM;
xchg = xchg_mgr->fcp_mm_start;
for (i = 0; i < xchg_sum; i++) {
if (!xchg)
break;
xchg++;
}
vfree(xchg_mgr->fcp_mm_start);
xchg_mgr->fcp_mm_start = NULL;
}
/* release the hot pool */
if (xchg_mgr->hot_pool) {
vfree(xchg_mgr->hot_pool);
xchg_mgr->hot_pool = NULL;
}
free_pool = &xchg_mgr->free_pool;
vfree(xchg_mgr);
UNF_REFERNCE_VAR(xchg_mgr);
UNF_REFERNCE_VAR(free_pool);
}
static void unf_free_xchg_mgr(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr)
{
unsigned long flags = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x894, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x895, UNF_TRUE, v_xchg_mgr, return);
/* 1. At first, free exchanges for this Exch_Mgr */
ret = unf_free_lport_xchg(v_lport, v_xchg_mgr);
/* 2. Delete this Exch_Mgr entry */
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
list_del_init(&v_xchg_mgr->xchg_mgr_entry);
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
/* 3. free Exch_Mgr memory if necessary */
if (ret == RETURN_OK) {
/* free memory directly */
unf_free_xchg_mgr_mem(v_lport, v_xchg_mgr);
} else {
/* Add it to Dirty list */
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
list_add_tail(&v_xchg_mgr->xchg_mgr_entry,
&v_lport->list_dirty_xchg_mgr_head);
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
/* Mark dirty flag */
unf_cmmark_dirty_mem(v_lport,
UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY);
}
}
void unf_free_all_xchg_mgr(struct unf_lport_s *v_lport)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
unsigned long flags = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x896, UNF_TRUE, v_lport, return);
/* for each L_Port->Exch_Mgr_List */
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
while (!list_empty(&v_lport->list_xchg_mgr_head)) {
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i);
unf_free_xchg_mgr(v_lport, xchg_mgr);
if (i < UNF_EXCHG_MGR_NUM)
v_lport->p_xchg_mgr[i] = NULL;
i++;
/* go to next */
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
}
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR;
}
static unsigned int unf_init_xchg_mgr(struct unf_xchg_mgr_s *v_xchg_mgr)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
UNF_CHECK_VALID(0x897, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR);
xchg_mgr = v_xchg_mgr;
memset(xchg_mgr, 0, sizeof(struct unf_xchg_mgr_s));
INIT_LIST_HEAD(&xchg_mgr->xchg_mgr_entry);
xchg_mgr->mgr_type = UNF_XCHG_MGR_FC;
xchg_mgr->min_xid = UNF_XCHG_MIN_XID;
xchg_mgr->max_xid = UNF_XCHG_MAX_XID;
xchg_mgr->fcp_mm_start = NULL;
xchg_mgr->mem_size = sizeof(struct unf_xchg_mgr_s);
return RETURN_OK;
}
static unsigned int unf_init_xchg_mgr_free_pool(
struct unf_xchg_mgr_s *v_xchg_mgr)
{
struct unf_xchg_free_pool_s *free_pool = NULL;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
UNF_CHECK_VALID(0x898, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR);
xchg_mgr = v_xchg_mgr;
free_pool = &xchg_mgr->free_pool;
INIT_LIST_HEAD(&free_pool->list_free_xchg_list);
INIT_LIST_HEAD(&free_pool->list_sfs_xchg_list);
spin_lock_init(&free_pool->xchg_free_pool_lock);
free_pool->fcp_xchg_sum = 0;
free_pool->xchg_mgr_completion = NULL;
return RETURN_OK;
}
static unsigned int unf_init_xchg_hot_pool(
struct unf_lport_s *v_lport,
struct unf_xchg_hot_pool_s *v_hot_pool,
unsigned int v_xchg_sum)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
UNF_CHECK_VALID(0x899, UNF_TRUE, v_hot_pool, return UNF_RETURN_ERROR);
hot_pool = v_hot_pool;
INIT_LIST_HEAD(&hot_pool->sfs_busylist);
INIT_LIST_HEAD(&hot_pool->ini_busylist);
spin_lock_init(&hot_pool->xchg_hot_pool_lock);
INIT_LIST_HEAD(&hot_pool->list_destroy_xchg);
hot_pool->total_xchges = 0;
hot_pool->total_res_cnt = 0;
hot_pool->wait_state = UNF_FALSE;
hot_pool->lport = v_lport;
/* Slab Pool Index */
hot_pool->slab_next_index = 0;
UNF_TOU16_CHECK(hot_pool->slab_total_sum, v_xchg_sum,
return UNF_RETURN_ERROR);
return RETURN_OK;
}
static unsigned int unf_alloc_and_init_big_sfs_pool(
struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr)
{
unsigned int i = 0;
unsigned int size = 0;
unsigned int align_size = 0;
unsigned int npiv_cnt = 0;
struct unf_big_sfs_pool_s *big_sfs_pool = NULL;
struct unf_big_sfs_s *big_sfs_buf = NULL;
unsigned int buf_total_size;
unsigned int buf_num;
unsigned int buf_cnt_perhugebuf;
unsigned int alloc_idx;
unsigned int curbuf_idx = 0;
unsigned int curbuf_offset = 0;
UNF_CHECK_VALID(0x900, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x901, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
big_sfs_pool = &v_xchg_mgr->st_big_sfs_pool;
INIT_LIST_HEAD(&big_sfs_pool->list_free_pool);
INIT_LIST_HEAD(&big_sfs_pool->list_busy_pool);
spin_lock_init(&big_sfs_pool->big_sfs_pool_lock);
npiv_cnt = v_lport->low_level_func.support_max_npiv_num;
/*
* The value*6 indicates GID_PT/GID_FT, RSCN, and ECHO
* Another command is received when a command is being responded
* A maximum of 20 resources are reserved for the RSCN.
* During the test, multiple rscn are found. As a result,
* the resources are insufficient and the disc fails.
*/
big_sfs_pool->free_count = (npiv_cnt + 1) * 6 + 20;
big_sfs_buf = (struct unf_big_sfs_s *)vmalloc(
big_sfs_pool->free_count
* sizeof(struct unf_big_sfs_s));
if (!big_sfs_buf) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Allocate Big SFS buf fail.");
return UNF_RETURN_ERROR;
}
memset(big_sfs_buf, 0, big_sfs_pool->free_count *
sizeof(struct unf_big_sfs_s));
v_xchg_mgr->mem_size +=
(unsigned int)
(big_sfs_pool->free_count * sizeof(struct unf_big_sfs_s));
big_sfs_pool->big_sfs_pool = (void *)big_sfs_buf;
/*
* Use the larger value of sizeof (struct unf_gif_acc_pld_s) and
* sizeof (struct unf_rscn_pld_s) to avoid the icp error.Therefore,
* the value is directly assigned instead of being compared.
*/
size = sizeof(struct unf_gif_acc_pld_s);
align_size = ALIGN(size, PAGE_SIZE);
buf_total_size = align_size * big_sfs_pool->free_count;
v_xchg_mgr->big_sfs_buf_list.buf_size =
buf_total_size > BUF_LIST_PAGE_SIZE ?
BUF_LIST_PAGE_SIZE : buf_total_size;
buf_cnt_perhugebuf =
v_xchg_mgr->big_sfs_buf_list.buf_size / align_size;
buf_num =
big_sfs_pool->free_count % buf_cnt_perhugebuf ?
big_sfs_pool->free_count / buf_cnt_perhugebuf + 1 :
big_sfs_pool->free_count / buf_cnt_perhugebuf;
v_xchg_mgr->big_sfs_buf_list.buflist =
(struct buff_list_s *)kmalloc(
buf_num * sizeof(struct buff_list_s),
GFP_KERNEL);
v_xchg_mgr->big_sfs_buf_list.buf_num = buf_num;
if (!v_xchg_mgr->big_sfs_buf_list.buflist) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[err]Allocate BigSfs pool buf list failed out of memory");
goto free_buff;
}
memset(v_xchg_mgr->big_sfs_buf_list.buflist, 0, buf_num *
sizeof(struct buff_list_s));
for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) {
v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr =
kmalloc(v_xchg_mgr->big_sfs_buf_list.buf_size,
GFP_ATOMIC);
if (!v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr)
goto free_buff;
memset(v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr,
0, v_xchg_mgr->big_sfs_buf_list.buf_size);
}
for (i = 0; i < big_sfs_pool->free_count; i++) {
if ((i != 0) && !(i % buf_cnt_perhugebuf))
curbuf_idx++;
curbuf_offset = align_size * (i % buf_cnt_perhugebuf);
big_sfs_buf->vaddr =
v_xchg_mgr->big_sfs_buf_list.buflist[curbuf_idx].vaddr +
curbuf_offset;
big_sfs_buf->size = size;
v_xchg_mgr->mem_size += size;
list_add_tail(&big_sfs_buf->entry_big_sfs,
&big_sfs_pool->list_free_pool);
big_sfs_buf++;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[EVENT]Allocate BigSfs pool size:%d,uiAlignSize:%d,buf_num:%d,buf_size:%d",
size, align_size, v_xchg_mgr->big_sfs_buf_list.buf_num,
v_xchg_mgr->big_sfs_buf_list.buf_size);
return RETURN_OK;
free_buff:
unf_free_all_big_sfs(v_xchg_mgr);
vfree(big_sfs_buf);
big_sfs_pool->big_sfs_pool = NULL;
return UNF_RETURN_ERROR;
}
/*
* Function Name : unf_free_one_big_sfs
* Function Description: Put the big sfs memory in xchg back to bigsfspool
* Input Parameters : struct unf_xchg_s * v_xchg
* Output Parameters : N/A
* Return Type : static void
*/
static void unf_free_one_big_sfs(struct unf_xchg_s *v_xchg)
{
unsigned long flag = 0;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
UNF_CHECK_VALID(0x902, UNF_TRUE, v_xchg, return);
xchg_mgr = v_xchg->xchg_mgr;
UNF_CHECK_VALID(0x903, UNF_TRUE, xchg_mgr, return);
if (!v_xchg->big_sfs_buf)
return;
if ((v_xchg->cmnd_code != NS_GID_PT) &&
(v_xchg->cmnd_code != NS_GID_FT) &&
(v_xchg->cmnd_code != ELS_ECHO) &&
(UNF_SET_ELS_ACC_TYPE(ELS_ECHO) != v_xchg->cmnd_code) &&
(v_xchg->cmnd_code != ELS_RSCN) &&
(UNF_SET_ELS_ACC_TYPE(ELS_RSCN) != v_xchg->cmnd_code)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT,
UNF_MAJOR,
"Exchange(0x%p), Command(0x%x) big SFS buf is not NULL.",
v_xchg, v_xchg->cmnd_code);
}
spin_lock_irqsave(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, flag);
list_del(&v_xchg->big_sfs_buf->entry_big_sfs);
list_add_tail(&v_xchg->big_sfs_buf->entry_big_sfs,
&xchg_mgr->st_big_sfs_pool.list_free_pool);
xchg_mgr->st_big_sfs_pool.free_count++;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"Free one big SFS buf(0x%p), Count(0x%x), Exchange(0x%p), Command(0x%x).",
v_xchg->big_sfs_buf->vaddr,
xchg_mgr->st_big_sfs_pool.free_count,
v_xchg, v_xchg->cmnd_code);
spin_unlock_irqrestore(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock,
flag);
}
static void unf_free_exchg_mgr_info(struct unf_lport_s *v_lport)
{
unsigned int i;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flags = 0;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
list_for_each_safe(node, next_node, &v_lport->list_xchg_mgr_head) {
list_del(node);
xchg_mgr = list_entry(node, struct unf_xchg_mgr_s,
xchg_mgr_entry);
}
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
xchg_mgr = v_lport->p_xchg_mgr[i];
if (xchg_mgr) {
unf_free_big_sfs_pool(xchg_mgr);
unf_free_all_rsp_pages(xchg_mgr);
if (xchg_mgr->sfs_mm_start) {
dma_free_coherent(
&v_lport->low_level_func.dev->dev,
xchg_mgr->sfs_mem_size,
xchg_mgr->sfs_mm_start,
xchg_mgr->sfs_phy_addr);
xchg_mgr->sfs_mm_start = 0;
}
if (xchg_mgr->fcp_mm_start) {
vfree(xchg_mgr->fcp_mm_start);
xchg_mgr->fcp_mm_start = NULL;
}
if (xchg_mgr->hot_pool) {
vfree(xchg_mgr->hot_pool);
xchg_mgr->hot_pool = NULL;
}
vfree(xchg_mgr);
v_lport->p_xchg_mgr[i] = NULL;
}
}
}
static unsigned int unf_alloc_and_init_xchg_mgr(struct unf_lport_s *v_lport)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct unf_xchg_s *xchg_mem = NULL;
void *sfs_mm_start = 0;
dma_addr_t sfs_phy_addr = 0;
unsigned int xchg_sum = 0;
unsigned int sfs_xchg_sum = 0;
unsigned long flags = 0;
unsigned int order = 0;
unsigned int ret = UNF_RETURN_ERROR;
unsigned int slab_num = 0;
unsigned int i = 0;
UNF_REFERNCE_VAR(order);
/* SFS_EXCH + I/O_EXCH */
ret = unf_get_xchg_config_sum(v_lport, &xchg_sum);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) can't get Exchange.",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
/* SFS Exchange Sum */
sfs_xchg_sum = v_lport->low_level_func.lport_cfg_items.max_sfs_xchg /
UNF_EXCHG_MGR_NUM;
xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM;
slab_num = v_lport->low_level_func.support_max_xid_range /
UNF_EXCHG_MGR_NUM;
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
/* Alloc Exchange Manager */
xchg_mgr = (struct unf_xchg_mgr_s *)
vmalloc(sizeof(struct unf_xchg_mgr_s));
if (!xchg_mgr) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) allocate Exchange Manager Memory Fail.",
v_lport->port_id);
goto exit;
}
/* Init Exchange Manager */
ret = unf_init_xchg_mgr(xchg_mgr);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) initialization Exchange Manager unsuccessful.",
v_lport->port_id);
goto free_xchg_mgr;
}
/* Initialize the Exchange Free Pool resource */
ret = unf_init_xchg_mgr_free_pool(xchg_mgr);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) initialization Exchange Manager Free Pool unsuccessful.",
v_lport->port_id);
goto free_xchg_mgr;
}
/* Allocate memory for Hot Pool and Xchg slab */
hot_pool = vmalloc(sizeof(struct unf_xchg_hot_pool_s) +
sizeof(struct unf_xchg_s *) * slab_num);
if (!hot_pool) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) allocate Hot Pool Memory Fail.",
v_lport->port_id);
goto free_xchg_mgr;
}
memset(hot_pool, 0,
sizeof(struct unf_xchg_hot_pool_s) +
sizeof(struct unf_xchg_s *) * slab_num);
xchg_mgr->mem_size +=
(unsigned int)(sizeof(struct unf_xchg_hot_pool_s) +
sizeof(struct unf_xchg_s *) * slab_num);
/* Initialize the Exchange Hot Pool resource */
ret = unf_init_xchg_hot_pool(v_lport, hot_pool, slab_num);
if (ret != RETURN_OK)
goto free_hot_pool;
hot_pool->base += (unsigned short)(i * slab_num);
/* Allocate the memory of all Xchg (IO/SFS) */
xchg_mem = vmalloc(sizeof(struct unf_xchg_s) * xchg_sum);
if (!xchg_mem) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) allocate Exchange Memory Fail.",
v_lport->port_id);
goto free_hot_pool;
}
memset(xchg_mem, 0, sizeof(struct unf_xchg_s) * xchg_sum);
xchg_mgr->mem_size +=
(unsigned int)(sizeof(struct unf_xchg_s) * xchg_sum);
xchg_mgr->hot_pool = hot_pool;
xchg_mgr->fcp_mm_start = xchg_mem;
/* Allocate the memory used by the SFS Xchg
* to carry the ELS/BLS/GS command and response
*/
xchg_mgr->sfs_mem_size =
(unsigned int)(sizeof(union unf_sfs_u) * sfs_xchg_sum);
/* Apply for the DMA space for sending sfs frames.
* If the value of DMA32 is less than 4 GB,
* cross-4G problems will not occur
*/
order = (unsigned int)get_order(xchg_mgr->sfs_mem_size);
sfs_mm_start = dma_alloc_coherent(
&v_lport->low_level_func.dev->dev,
xchg_mgr->sfs_mem_size,
&sfs_phy_addr, GFP_KERNEL);
if (!sfs_mm_start) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) Get Free Pagers Fail, Order(%u).",
v_lport->port_id, order);
goto free_xchg_mem;
}
memset(sfs_mm_start, 0, sizeof(union unf_sfs_u) * sfs_xchg_sum);
xchg_mgr->mem_size += xchg_mgr->sfs_mem_size;
xchg_mgr->sfs_mm_start = sfs_mm_start;
xchg_mgr->sfs_phy_addr = sfs_phy_addr;
/* The Xchg is initialized and mounted to the Free Pool */
ret = unf_init_xchg(v_lport, xchg_mgr, xchg_sum, sfs_xchg_sum);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) initialization Exchange unsuccessful, Exchange Number(%u), SFS Exchange number(%u).",
v_lport->port_id, xchg_sum, sfs_xchg_sum);
dma_free_coherent(&v_lport->low_level_func.dev->dev,
xchg_mgr->sfs_mem_size,
xchg_mgr->sfs_mm_start,
xchg_mgr->sfs_phy_addr);
xchg_mgr->sfs_mm_start = 0;
goto free_xchg_mem;
}
/* Apply for the memory used by GID_PT, GID_FT, and RSCN */
ret = unf_alloc_and_init_big_sfs_pool(v_lport, xchg_mgr);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) allocate big SFS fail",
v_lport->port_id);
unf_free_all_rsp_pages(xchg_mgr);
dma_free_coherent(&v_lport->low_level_func.dev->dev,
xchg_mgr->sfs_mem_size,
xchg_mgr->sfs_mm_start,
xchg_mgr->sfs_phy_addr);
xchg_mgr->sfs_mm_start = 0;
goto free_xchg_mem;
}
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
v_lport->p_xchg_mgr[i] = (void *)xchg_mgr;
list_add_tail(&xchg_mgr->xchg_mgr_entry,
&v_lport->list_xchg_mgr_head);
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) ExchangeMgr:(0x%p),Base:(0x%x).",
v_lport->port_id, v_lport->p_xchg_mgr[i],
hot_pool->base);
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"Port(0x%x) allocate Exchange Manager size(0x%x).",
v_lport->port_id, xchg_mgr->mem_size);
return RETURN_OK;
free_xchg_mem:
vfree(xchg_mem);
free_hot_pool:
vfree(hot_pool);
free_xchg_mgr:
vfree(xchg_mgr);
exit:
unf_free_exchg_mgr_info(v_lport);
return UNF_RETURN_ERROR;
}
void unf_xchg_mgr_destroy(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x905, UNF_TRUE, v_lport, return);
unf_free_all_xchg_mgr(v_lport);
}
unsigned int unf_alloc_xchg_resource(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x906, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
INIT_LIST_HEAD(&v_lport->list_dirty_xchg_mgr_head);
INIT_LIST_HEAD(&v_lport->list_xchg_mgr_head);
spin_lock_init(&v_lport->xchg_mgr_lock);
/* LPort Xchg Management Unit Allocation */
if (unf_alloc_and_init_xchg_mgr(v_lport) != RETURN_OK)
return UNF_RETURN_ERROR;
return RETURN_OK;
}
void unf_destroy_dirty_xchg(struct unf_lport_s *v_lport, int v_show_only)
{
unsigned int dirty_xchg = 0;
struct unf_xchg_mgr_s *exch_mgr = NULL;
unsigned long flags = 0;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
UNF_CHECK_VALID(0x908, UNF_TRUE, v_lport, return);
if (v_lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) {
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
list_for_each_safe(node, next_node,
&v_lport->list_dirty_xchg_mgr_head) {
exch_mgr = list_entry(node, struct unf_xchg_mgr_s,
xchg_mgr_entry);
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
if (exch_mgr) {
dirty_xchg =
(exch_mgr->free_pool.total_fcp_xchg +
exch_mgr->free_pool.total_sfs_xchg);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) has %u dirty exchange(s)",
v_lport->port_id, dirty_xchg);
unf_show_all_xchg(v_lport, exch_mgr);
if (v_show_only == UNF_FALSE) {
/* Delete Dirty Exchange Mgr entry */
spin_lock_irqsave(
&v_lport->xchg_mgr_lock,
flags);
list_del_init(
&exch_mgr->xchg_mgr_entry);
spin_unlock_irqrestore(
&v_lport->xchg_mgr_lock,
flags);
/* Free Dirty Exchange Mgr memory */
unf_free_xchg_mgr_mem(v_lport,
exch_mgr);
}
}
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
}
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
}
UNF_REFERNCE_VAR(dirty_xchg);
}
struct unf_xchg_mgr_s *unf_get_xchg_mgr_by_lport(struct unf_lport_s *v_lport,
unsigned int v_idx)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x909, UNF_TRUE, v_lport, return NULL);
UNF_CHECK_VALID(0x910, UNF_TRUE, v_idx < UNF_EXCHG_MGR_NUM,
return NULL);
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
xchg_mgr = v_lport->p_xchg_mgr[v_idx];
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
return xchg_mgr;
}
struct unf_xchg_hot_pool_s *unf_get_hot_pool_by_lport(
struct unf_lport_s *v_lport,
unsigned int v_mgr_idx)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x910, UNF_TRUE, (v_lport), return NULL);
lport = (struct unf_lport_s *)(v_lport->root_lport);
UNF_CHECK_VALID(0x910, UNF_TRUE, (lport), return NULL);
/* Get Xchg Manager */
xchg_mgr = unf_get_xchg_mgr_by_lport(lport, v_mgr_idx);
if (!xchg_mgr) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) Exchange Manager is NULL.",
lport->port_id);
return NULL;
}
/* Get Xchg Manager Hot Pool */
return xchg_mgr->hot_pool;
}
static inline void unf_hot_pool_slab_set(
struct unf_xchg_hot_pool_s *v_hot_pool,
unsigned short v_slab_index,
struct unf_xchg_s *v_xchg)
{
UNF_CHECK_VALID(0x911, UNF_TRUE, v_hot_pool, return);
v_hot_pool->xchg_slab[v_slab_index] = v_xchg;
}
static inline struct unf_xchg_s *unf_get_xchg_by_xchg_tag(
struct unf_xchg_hot_pool_s *v_hot_pool,
unsigned short v_slab_index)
{
UNF_CHECK_VALID(0x912, UNF_TRUE, v_hot_pool, return NULL);
return v_hot_pool->xchg_slab[v_slab_index];
}
static void *unf_lookup_xchg_by_tag(void *v_lport,
unsigned short v_hot_pool_tag)
{
struct unf_lport_s *lport = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long flags = 0;
unsigned int exchg_mgr_idx = 0;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
UNF_CHECK_VALID(0x913, UNF_TRUE, v_lport, return NULL);
/* In the case of NPIV, v_pstLport is the Vport pointer,
* the share uses the ExchMgr of RootLport
*/
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x914, UNF_TRUE, lport, return NULL);
exchg_mgr_idx = (v_hot_pool_tag * UNF_EXCHG_MGR_NUM) /
lport->low_level_func.support_max_xid_range;
if (unlikely(exchg_mgr_idx >= UNF_EXCHG_MGR_NUM)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) Get ExchgMgr %u err",
lport->port_id, exchg_mgr_idx);
return NULL;
}
xchg_mgr = lport->p_xchg_mgr[exchg_mgr_idx];
if (unlikely(!xchg_mgr)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) ExchgMgr %u is null",
lport->port_id, exchg_mgr_idx);
return NULL;
}
hot_pool = xchg_mgr->hot_pool;
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) Hot Pool is NULL.", lport->port_id);
return NULL;
}
if (unlikely(v_hot_pool_tag >=
(hot_pool->slab_total_sum + hot_pool->base))) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]LPort(0x%x) can't Input Tag(0x%x), Max(0x%x).",
lport->port_id, v_hot_pool_tag,
(hot_pool->slab_total_sum + hot_pool->base));
return NULL;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
xchg = unf_get_xchg_by_xchg_tag(hot_pool,
v_hot_pool_tag - hot_pool->base);
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
return (void *)xchg;
}
static void *unf_find_xchg_by_oxid(void *v_lport, unsigned short v_oxid,
unsigned int v_oid)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct unf_xchg_s *xchg = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
struct unf_lport_s *lport = NULL;
unsigned long flags = 0;
unsigned long xchg_flags = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x915, UNF_TRUE, (v_lport), return NULL);
/* In the case of NPIV, the v_lport is the Vport pointer,
* and the share uses the ExchMgr of the RootLport
*/
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x916, UNF_TRUE, (lport), return NULL);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO,
UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) MgrIdex %u Hot Pool is NULL.",
lport->port_id, i);
continue;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
/* 1. Traverse sfs_busy list */
list_for_each_safe(node, next_node, &hot_pool->sfs_busylist) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags);
if (UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg)) {
atomic_inc(&xchg->ref_cnt);
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flags);
spin_unlock_irqrestore(
&hot_pool->xchg_hot_pool_lock, flags);
return xchg;
}
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flags);
}
/* 2. Traverse INI_Busy List */
list_for_each_safe(node, next_node, &hot_pool->ini_busylist) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags);
if (UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg)) {
atomic_inc(&xchg->ref_cnt);
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flags);
spin_unlock_irqrestore(
&hot_pool->xchg_hot_pool_lock, flags);
return xchg;
}
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flags);
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
}
return NULL;
}
static inline int unf_check_xchg_matched(struct unf_xchg_s *xchg,
unsigned long long v_command_sn,
unsigned int v_world_id)
{
int matched = 0;
matched = (v_command_sn == xchg->cmnd_sn);
if (matched && (atomic_read(&xchg->ref_cnt) > 0))
return UNF_TRUE;
else
return UNF_FALSE;
}
static void *unf_lookup_xchg_by_cmnd_sn(void *v_lport,
unsigned long long v_command_sn,
unsigned int v_world_id)
{
struct unf_lport_s *lport = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long flags = 0;
unsigned int i;
UNF_CHECK_VALID(0x919, UNF_TRUE, v_lport, return NULL);
/* In NPIV, v_lport is a Vport pointer, and idle resources are
* shared by ExchMgr of RootLport.
* However, busy resources are mounted on each vport.
* Therefore, vport needs to be used.
*/
lport = (struct unf_lport_s *)v_lport;
UNF_CHECK_VALID(0x920, UNF_TRUE, lport, return NULL);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) hot pool is NULL",
lport->port_id);
continue;
}
/* from busy_list */
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
list_for_each_safe(node, next_node, &hot_pool->ini_busylist) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
if (unf_check_xchg_matched(xchg, v_command_sn,
v_world_id)) {
spin_unlock_irqrestore(
&hot_pool->xchg_hot_pool_lock, flags);
return xchg;
}
}
/* vport: from destroy_list */
if (lport != lport->root_lport) {
list_for_each_safe(node, next_node,
&hot_pool->list_destroy_xchg) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
if (unf_check_xchg_matched(xchg, v_command_sn,
v_world_id)) {
spin_unlock_irqrestore(
&hot_pool->xchg_hot_pool_lock,
flags);
UNF_TRACE(UNF_EVTLOG_IO_INFO,
UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Port(0x%x) lookup exchange from destroy list",
lport->port_id);
return xchg;
}
}
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
}
return NULL;
}
static inline unsigned int unf_alloc_hot_pool_slab(
struct unf_xchg_hot_pool_s *v_hot_pool,
struct unf_xchg_s *v_xchg,
unsigned short v_rx_id)
{
unsigned short slab_index = 0;
UNF_CHECK_VALID(0x921, UNF_TRUE, v_hot_pool, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x922, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR);
/* Check whether the hotpool tag is in the specified range sirt.
* If yes, set up the management relationship. If no,
* handle the problem according to the normal IO.
* If the sirt digitmap is used but the tag is occupied,
* it indicates that the I/O is discarded.
*/
v_hot_pool->slab_next_index =
(unsigned short)v_hot_pool->slab_next_index;
slab_index = v_hot_pool->slab_next_index;
while (unf_get_xchg_by_xchg_tag(v_hot_pool, slab_index)) {
slab_index++;
slab_index = slab_index % v_hot_pool->slab_total_sum;
/* Rewind occurs */
if (slab_index == v_hot_pool->slab_next_index) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"There is No Slab At Hot Pool(0x%p) for xchg(0x%p).",
v_hot_pool, v_xchg);
return UNF_RETURN_ERROR;
}
}
unf_hot_pool_slab_set(v_hot_pool, slab_index, v_xchg);
v_xchg->hot_pool_tag = slab_index + v_hot_pool->base;
slab_index++;
v_hot_pool->slab_next_index =
slab_index % v_hot_pool->slab_total_sum;
return RETURN_OK;
}
struct unf_esgl_page_s *unf_get_one_free_esgl_page(struct unf_lport_s *v_lport,
struct unf_xchg_s *v_xchg)
{
struct unf_lport_s *lport = NULL;
struct unf_esgl_s *esgl = NULL;
struct unf_xchg_s *xchg = NULL;
struct list_head *list_head = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x923, UNF_TRUE, v_lport, return NULL);
UNF_CHECK_VALID(0x924, UNF_TRUE, v_xchg, return NULL);
lport = v_lport;
xchg = v_xchg;
/* Obtain a new Esgl from the EsglPool and
* add it to the list_esgls of the Xchg
*/
spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag);
if (!list_empty(&lport->esgl_pool.list_esgl_pool)) {
list_head = (&lport->esgl_pool.list_esgl_pool)->next;
list_del(list_head);
lport->esgl_pool.esgl_pool_count--;
list_add_tail(list_head, &xchg->list_esgls);
esgl = list_entry(list_head, struct unf_esgl_s, entry_esgl);
atomic_inc(&xchg->esgl_cnt);
spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag);
} else {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) esgl pool is empty",
lport->nport_id);
spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag);
return NULL;
}
return &esgl->page;
}
void unf_release_esgls(struct unf_xchg_s *v_xchg)
{
struct unf_lport_s *lport = NULL;
struct list_head *list = NULL;
struct list_head *list_tmp = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x925, UNF_TRUE, v_xchg, return);
UNF_CHECK_VALID(0x926, UNF_TRUE, v_xchg->lport, return);
if (atomic_read(&v_xchg->esgl_cnt) <= 0)
return;
/* In the case of NPIV, the Vport pointer is saved in v_pstExch,
* and the EsglPool of RootLport is shared.
*/
lport = (v_xchg->lport)->root_lport;
UNF_CHECK_VALID(0x927, UNF_TRUE, (lport), return);
spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag);
if (!list_empty(&v_xchg->list_esgls)) {
list_for_each_safe(list, list_tmp, &v_xchg->list_esgls) {
list_del(list);
list_add_tail(list, &lport->esgl_pool.list_esgl_pool);
lport->esgl_pool.esgl_pool_count++;
atomic_dec(&v_xchg->esgl_cnt);
}
}
spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag);
}
static void unf_init_xchg_attribute(struct unf_xchg_s *v_xchg)
{
unsigned long flags = 0;
UNF_CHECK_VALID(0x973, UNF_TRUE, (v_xchg), return);
spin_lock_irqsave(&v_xchg->xchg_state_lock, flags);
v_xchg->xchg_mgr = NULL;
v_xchg->free_pool = NULL;
v_xchg->hot_pool = NULL;
v_xchg->lport = NULL;
v_xchg->rport = NULL;
v_xchg->disc_rport = NULL;
v_xchg->io_state = UNF_IO_STATE_NEW;
v_xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE;
v_xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID;
v_xchg->io_send_abort = UNF_FALSE;
v_xchg->io_abort_result = UNF_FALSE;
v_xchg->abts_state = 0;
v_xchg->ox_id = INVALID_VALUE16;
v_xchg->abort_oxid = INVALID_VALUE16;
v_xchg->rx_id = INVALID_VALUE16;
v_xchg->sid = INVALID_VALUE32;
v_xchg->did = INVALID_VALUE32;
v_xchg->oid = INVALID_VALUE32;
v_xchg->disc_port_id = INVALID_VALUE32;
v_xchg->seq_id = INVALID_VALUE8;
v_xchg->cmnd_code = INVALID_VALUE32;
v_xchg->cmnd_sn = INVALID_VALUE64;
v_xchg->data_len = 0;
v_xchg->resid_len = 0;
v_xchg->data_direction = DMA_NONE;
v_xchg->hot_pool_tag = INVALID_VALUE16;
v_xchg->big_sfs_buf = NULL;
v_xchg->may_consume_res_cnt = 0;
v_xchg->fact_consume_res_cnt = 0;
v_xchg->io_front_jif = INVALID_VALUE64;
v_xchg->ob_callback_sts = UNF_IO_SUCCESS;
v_xchg->start_jif = 0;
v_xchg->rport_bind_jifs = INVALID_VALUE64;
v_xchg->scsi_id = INVALID_VALUE32;
v_xchg->world_id = INVALID_VALUE32;
memset(&v_xchg->seq, 0, sizeof(struct unf_seq_s));
memset(&v_xchg->fcp_cmnd, 0, sizeof(struct unf_fcp_cmnd_s));
memset(&v_xchg->scsi_cmnd_info, 0, sizeof(struct unf_scsi_cmd_info_s));
memset(&v_xchg->abts_rsps, 0, sizeof(struct unf_abts_rsps_s));
memset(&v_xchg->dif_info, 0, sizeof(struct dif_info_s));
memset(v_xchg->private, 0,
(PKG_MAX_PRIVATE_DATA_SIZE * sizeof(unsigned int)));
v_xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_OK;
v_xchg->echo_info.response_time = 0;
if (v_xchg->xchg_type == UNF_XCHG_TYPE_INI) {
if (v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu)
memset(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu,
0, sizeof(struct unf_fcprsp_iu_s));
} else if (v_xchg->xchg_type == UNF_XCHG_TYPE_SFS) {
if (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) {
memset(v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr,
0, sizeof(union unf_sfs_u));
v_xchg->fcp_sfs_union.sfs_entry.cur_offset = 0;
}
} else {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Exchange Type(0x%x) SFS Union uninited.",
v_xchg->xchg_type);
}
v_xchg->xchg_type = UNF_XCHG_TYPE_INVALID;
v_xchg->pfn_ob_callback = NULL;
v_xchg->pfn_callback = NULL;
v_xchg->pfn_free_xchg = NULL;
atomic_set(&v_xchg->ref_cnt, 0);
atomic_set(&v_xchg->esgl_cnt, 0);
atomic_set(&v_xchg->delay_flag, 0);
if (delayed_work_pending(&v_xchg->timeout_work))
UNF_DEL_XCHG_TIMER_SAFE(v_xchg);
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags);
}
static void unf_add_back_to_fcp_list(
struct unf_xchg_free_pool_s *v_free_pool,
struct unf_xchg_s *v_xchg)
{
unsigned long flags = 0;
UNF_CHECK_VALID(0x928, UNF_TRUE, v_free_pool, return);
UNF_CHECK_VALID(0x929, UNF_TRUE, v_xchg, return);
unf_init_xchg_attribute(v_xchg);
/* The released I/O resources are added to
* the queue tail to facilitate fault locating
*/
spin_lock_irqsave(&v_free_pool->xchg_free_pool_lock, flags);
list_add_tail(&v_xchg->list_xchg_entry,
&v_free_pool->list_free_xchg_list);
v_free_pool->total_fcp_xchg++;
spin_unlock_irqrestore(&v_free_pool->xchg_free_pool_lock, flags);
}
static void unf_check_xchg_mgr_status(struct unf_xchg_mgr_s *v_xchg_mgr)
{
unsigned long flags = 0;
unsigned int total_xchg = 0;
unsigned int total_xchg_sum = 0;
UNF_CHECK_VALID(0x930, UNF_TRUE, v_xchg_mgr, return);
spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock, flags);
total_xchg = v_xchg_mgr->free_pool.total_fcp_xchg +
v_xchg_mgr->free_pool.total_sfs_xchg;
total_xchg_sum = v_xchg_mgr->free_pool.fcp_xchg_sum +
v_xchg_mgr->free_pool.sfs_xchg_sum;
if ((v_xchg_mgr->free_pool.xchg_mgr_completion) &&
(total_xchg == total_xchg_sum)) {
complete(v_xchg_mgr->free_pool.xchg_mgr_completion);
}
spin_unlock_irqrestore(&v_xchg_mgr->free_pool.xchg_free_pool_lock,
flags);
}
static void unf_free_fcp_xchg(struct unf_xchg_s *v_xchg)
{
struct unf_xchg_free_pool_s *free_pool = NULL;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
UNF_CHECK_VALID(0x932, UNF_TRUE, v_xchg, return);
/* Releasing a Specified INI I/O and Invoking the scsi_done Process */
unf_done_ini_xchg(v_xchg);
free_pool = v_xchg->free_pool;
xchg_mgr = v_xchg->xchg_mgr;
lport = v_xchg->lport;
rport = v_xchg->rport;
atomic_dec(&rport->pending_io_cnt);
/* Release the Esgls in the Xchg structure and
* return it to the EsglPool of the Lport
*/
unf_release_esgls(v_xchg);
/* Mount I/O resources to the FCP Free linked list */
unf_add_back_to_fcp_list(free_pool, v_xchg);
/* The Xchg is released synchronously and then forcibly released to
* prevent the Xchg from accessing the Xchg in the normal I/O process
*/
if (unlikely(lport->b_port_removing == UNF_TRUE))
unf_check_xchg_mgr_status(xchg_mgr);
}
static void unf_fc_abort_timeout_cmnd(struct unf_lport_s *v_lport,
struct unf_xchg_s *v_xchg)
{
struct unf_lport_s *lport = v_lport;
struct unf_xchg_s *xchg = v_xchg;
struct unf_scsi_cmd_s scsi_cmnd = { 0 };
unsigned long flag = 0;
unsigned int timeout_value = 2000;
unsigned int return_value = 0;
struct unf_rport_scsi_id_image_s *scsi_image_table = NULL;
UNF_CHECK_VALID(0x936, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x937, UNF_TRUE, v_xchg, return);
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
if (v_xchg->io_state & INI_IO_STATE_UPABORT) {
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"LPort(0x%x) xchange(0x%p) OX_ID(0x%x), RX_ID(0x%x) Cmdsn(0x%lx) has been aborted.",
lport->port_id, v_xchg, v_xchg->ox_id,
v_xchg->rx_id, (unsigned long)v_xchg->cmnd_sn);
return;
}
v_xchg->io_state |= INI_IO_STATE_UPABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_NORMAL, UNF_KEVENT,
"LPort(0x%x) exchg(0x%p) OX_ID(0x%x) RX_ID(0x%x) Cmdsn(0x%lx) timeout abort it",
lport->port_id, v_xchg, v_xchg->ox_id,
v_xchg->rx_id, (unsigned long)v_xchg->cmnd_sn);
lport->xchg_mgr_temp.pfn_unf_xchg_add_timer(
(void *)v_xchg,
(unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT,
UNF_TIMER_TYPE_INI_ABTS);
sema_init(&v_xchg->task_sema, 0);
scsi_cmnd.scsi_id = xchg->scsi_cmnd_info.scsi_id;
scsi_cmnd.upper_cmnd = xchg->scsi_cmnd_info.scsi_cmnd;
scsi_cmnd.pfn_done = xchg->scsi_cmnd_info.pfn_done;
scsi_image_table = &lport->rport_scsi_table;
if (unf_send_abts(lport, v_xchg) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"LPort(0x%x) send ABTS, Send ABTS unsuccessful. Exchange OX_ID(0x%x), RX_ID(0x%x).",
lport->port_id, v_xchg->ox_id,
v_xchg->rx_id);
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg);
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
/* The message fails to be sent.
* It is released internally and does not
* need to be released externally.
*/
return;
}
if (down_timeout(&v_xchg->task_sema,
(long long)msecs_to_jiffies(timeout_value))) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) recv abts marker timeout,Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x)",
lport->port_id, v_xchg,
v_xchg->ox_id, v_xchg->rx_id);
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg);
/* Cnacel the flag of INI_IO_STATE_UPABORT
* and process the io in TMF
*/
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
return;
}
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
if ((v_xchg->ucode_abts_state == UNF_IO_SUCCESS) ||
(v_xchg->scsi_cmnd_info.result == UNF_IO_ABORT_PORT_REMOVING)) {
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Port(0x%x) Send ABTS succeed and recv marker Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) marker status(0x%x)",
lport->port_id, v_xchg,
v_xchg->ox_id, v_xchg->rx_id,
v_xchg->ucode_abts_state);
return_value = DID_BUS_BUSY;
UNF_IO_RESULT_CNT(scsi_image_table, scsi_cmnd.scsi_id,
return_value);
unf_complete_cmnd(&scsi_cmnd, DID_BUS_BUSY << 16);
return;
}
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg);
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) send ABTS failed. Exch(0x%p) hot_tag(0x%x) ret(0x%x) v_xchg->io_state (0x%x)",
lport->port_id, v_xchg, v_xchg->hot_pool_tag,
v_xchg->scsi_cmnd_info.result, v_xchg->io_state);
}
static void unf_fc_ini_send_abts_timeout(struct unf_lport_s *lport,
struct unf_rport_s *rport,
struct unf_xchg_s *xchg)
{
if (xchg->rport_bind_jifs == rport->rport_alloc_jifs &&
xchg->rport_bind_jifs != INVALID_VALUE64) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) first time to send abts timeout, retry again OX_ID(0x%x) RX_ID(0x%x) state(0x%x)",
lport->port_id, rport->nport_id,
xchg, xchg->ox_id, xchg->rx_id, xchg->io_state);
lport->xchg_mgr_temp.pfn_unf_xchg_add_timer(
(void *)xchg,
(unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT,
UNF_TIMER_TYPE_INI_ABTS);
if (unf_send_abts(lport, xchg) != RETURN_OK) {
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(
(void *)xchg);
unf_abts_timeout_recovery_default(rport, xchg);
unf_cm_free_xchg(lport, xchg);
}
} else {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) rport is invalid, exchg rport jiff(0x%llx 0x%llx), free exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x)",
lport->port_id, rport->nport_id, xchg,
xchg->rport_bind_jifs, rport->rport_alloc_jifs,
xchg->ox_id, xchg->rx_id, xchg->io_state);
unf_cm_free_xchg(lport, xchg);
}
}
static void unf_fc_ini_io_rec_wait_timeout(struct unf_lport_s *lport,
struct unf_rport_s *rport,
struct unf_xchg_s *xchg)
{
unsigned long io_time_out = 0;
if (xchg->rport_bind_jifs == rport->rport_alloc_jifs) {
unf_send_rec(lport, rport, xchg);
if (xchg->scsi_cmnd_info.abort_timeout > 0) {
io_time_out =
(xchg->scsi_cmnd_info.abort_timeout >
UNF_REC_TOV) ?
(xchg->scsi_cmnd_info.abort_timeout -
UNF_REC_TOV) : 0;
if (io_time_out > 0) {
lport->xchg_mgr_temp.pfn_unf_xchg_add_timer(
(void *)xchg,
io_time_out,
UNF_TIMER_TYPE_REQ_IO);
} else {
unf_fc_abort_timeout_cmnd(lport, xchg);
}
}
} else {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) Rec timeout exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x), bindjifs(0x%llx)no eqal Rport alloc jifs(0x%llx)",
lport->port_id, rport->nport_id,
xchg, xchg->ox_id, xchg->rx_id,
xchg->io_state, xchg->rport_bind_jifs,
rport->rport_alloc_jifs);
}
}
static void unf_fc_ini_io_xchg_timeout(struct work_struct *v_work)
{
struct unf_xchg_s *xchg = NULL;
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
unsigned long flags = 0;
unsigned int ret = UNF_RETURN_ERROR;
unsigned int port_valid_flag = 0;
UNF_REFERNCE_VAR(ret);
xchg = container_of(v_work, struct unf_xchg_s, timeout_work.work);
UNF_CHECK_VALID(0x939, UNF_TRUE, xchg, return);
ret = unf_xchg_ref_inc(xchg, INI_IO_TIMEOUT);
UNF_CHECK_VALID(0x940, UNF_TRUE, ret == RETURN_OK, return);
lport = xchg->lport;
rport = xchg->rport;
port_valid_flag = !lport || !rport;
if (port_valid_flag) {
unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT);
unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT);
return;
}
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
/* 1. for Send RRQ failed Timer timeout */
if (INI_IO_STATE_RRQSEND_ERR & xchg->io_state) {
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[info]LPort(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for RRQ send failed OX_ID(0x%x) RX_ID(0x%x) state(0x%x)",
lport->port_id, rport->nport_id,
xchg, xchg->ox_id, xchg->rx_id, xchg->io_state);
unf_cm_free_xchg(lport, xchg);
}
/* Second ABTS timeout and enter LOGO process */
else if ((INI_IO_STATE_ABORT_TIMEOUT & xchg->io_state) &&
(!(ABTS_RESPONSE_RECEIVED & xchg->abts_state))) {
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for second abts send OX_ID(0x%x) RX_ID(0x%x) state(0x%x)",
lport->port_id, rport->nport_id,
xchg, xchg->ox_id, xchg->rx_id,
xchg->io_state);
unf_abts_timeout_recovery_default(rport, xchg);
unf_cm_free_xchg(lport, xchg);
}
/* First time to send ABTS, timeout and retry to send ABTS again */
else if ((xchg->io_state & INI_IO_STATE_UPABORT) &&
(!(xchg->abts_state & ABTS_RESPONSE_RECEIVED))) {
xchg->io_state |= INI_IO_STATE_ABORT_TIMEOUT;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
unf_fc_ini_send_abts_timeout(lport, rport, xchg);
}
/* 3. IO_DONE */
else if ((xchg->io_state & INI_IO_STATE_DONE) &&
(xchg->abts_state & ABTS_RESPONSE_RECEIVED)) {
/*
* for IO_DONE:
* 1. INI ABTS first timer time out
* 2. INI RCVD ABTS Response
* 3. Normal case for I/O Done
*/
/* Send ABTS & RCVD RSP & no timeout */
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
/* Send RRQ */
if (unf_send_rrq(lport, rport, xchg) == RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_MAJOR,
"[info]LPort(0x%x) send RRQ succeed to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)",
lport->port_id, rport->nport_id, xchg,
xchg->ox_id, xchg->rx_id, xchg->io_state);
} else {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT,
UNF_WARN,
"[warn]LPort(0x%x) can't send RRQ to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)",
lport->port_id, rport->nport_id, xchg,
xchg->ox_id, xchg->rx_id, xchg->io_state);
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
xchg->io_state |= INI_IO_STATE_RRQSEND_ERR;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
lport->xchg_mgr_temp.pfn_unf_xchg_add_timer(
(void *)xchg,
(unsigned long)UNF_WRITE_RRQ_SENDERR_INTERVAL,
UNF_TIMER_TYPE_INI_IO);
}
} else if (xchg->io_state & INI_IO_STATE_REC_TIMEOUT_WAIT) {
xchg->io_state &= ~INI_IO_STATE_REC_TIMEOUT_WAIT;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
unf_fc_ini_io_rec_wait_timeout(lport, rport, xchg);
} else {
/* 4. I/O Timer Timeout */
/* vmware */
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
unf_fc_abort_timeout_cmnd(lport, xchg);
}
unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT);
unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT);
UNF_REFERNCE_VAR(ret);
}
static inline struct unf_xchg_s *unf_alloc_io_xchg(
struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr,
unsigned int v_xchg_type,
unsigned short v_rx_id)
{
struct unf_xchg_s *xchg = NULL;
struct list_head *list_node = NULL;
struct unf_xchg_free_pool_s *free_pool = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
unsigned long flags = 0;
static atomic64_t s_exhg_id;
void (*unf_fc_io_xchg_timeout)(struct work_struct *v_work) = NULL;
UNF_CHECK_VALID(0x941, UNF_TRUE, v_xchg_mgr, return NULL);
UNF_CHECK_VALID(0x942, UNF_TRUE, v_lport, return NULL);
free_pool = &v_xchg_mgr->free_pool;
hot_pool = v_xchg_mgr->hot_pool;
UNF_CHECK_VALID(0x943, UNF_TRUE, free_pool, return NULL);
UNF_CHECK_VALID(0x944, UNF_TRUE, hot_pool, return NULL);
/* 1. Free Pool */
spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags);
if (unlikely(list_empty(&free_pool->list_free_xchg_list))) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO,
"Port(0x%x) have no Exchange anymore.",
v_lport->port_id);
spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags);
return NULL;
}
/* Select an idle node from free pool */
list_node = (&free_pool->list_free_xchg_list)->next;
list_del(list_node);
free_pool->total_fcp_xchg--;
spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags);
xchg = list_entry(list_node, struct unf_xchg_s, list_xchg_entry);
/*
* Hot Pool:
* When xchg is mounted to Hot Pool, the mount mode and release mode
* of Xchg must be specified and stored in the sfs linked list.
*/
flags = 0;
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
if (unf_alloc_hot_pool_slab(hot_pool, xchg, v_rx_id) != RETURN_OK) {
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
unf_add_back_to_fcp_list(free_pool, xchg);
if (unlikely(v_lport->b_port_removing == UNF_TRUE))
unf_check_xchg_mgr_status(v_xchg_mgr);
return NULL;
}
list_add_tail(&xchg->list_xchg_entry, &hot_pool->ini_busylist);
unf_fc_io_xchg_timeout = unf_fc_ini_io_xchg_timeout;
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
/* 3. Exchange State */
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
xchg->start_jif = atomic64_inc_return(&s_exhg_id);
xchg->xchg_mgr = v_xchg_mgr;
xchg->free_pool = free_pool;
xchg->hot_pool = hot_pool;
xchg->lport = v_lport;
xchg->xchg_type = v_xchg_type;
xchg->pfn_free_xchg = unf_free_fcp_xchg;
xchg->io_state = UNF_IO_STATE_NEW;
xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE;
xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID;
xchg->io_send_abort = UNF_FALSE;
xchg->io_abort_result = UNF_FALSE;
xchg->ox_id = INVALID_VALUE16;
xchg->abort_oxid = INVALID_VALUE16;
xchg->rx_id = INVALID_VALUE16;
xchg->sid = INVALID_VALUE32;
xchg->did = INVALID_VALUE32;
xchg->oid = INVALID_VALUE32;
xchg->seq_id = INVALID_VALUE8;
xchg->cmnd_code = INVALID_VALUE32;
xchg->data_len = 0;
xchg->resid_len = 0;
xchg->data_direction = DMA_NONE;
xchg->may_consume_res_cnt = 0;
xchg->fact_consume_res_cnt = 0;
xchg->io_front_jif = 0;
xchg->tmf_state = 0;
xchg->ucode_abts_state = INVALID_VALUE32;
xchg->abts_state = 0;
xchg->rport_bind_jifs = INVALID_VALUE64;
xchg->scsi_id = INVALID_VALUE32;
xchg->world_id = INVALID_VALUE32;
memset(&xchg->dif_control, 0, sizeof(struct unf_dif_control_info_s));
memset(&xchg->req_sgl_info, 0, sizeof(struct unf_req_sgl_info_s));
memset(&xchg->dif_sgl_info, 0, sizeof(struct unf_req_sgl_info_s));
memset(&xchg->abts_rsps, 0, sizeof(struct unf_abts_rsps_s));
xchg->scsi_cmnd_info.result = 0;
xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] =
(unsigned int)atomic64_inc_return(&v_lport->exchg_index);
if (xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0)
xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] =
(unsigned int)atomic64_inc_return(&v_lport->exchg_index);
atomic_set(&xchg->ref_cnt, 0);
atomic_set(&xchg->delay_flag, 0);
if (delayed_work_pending(&xchg->timeout_work))
UNF_DEL_XCHG_TIMER_SAFE(xchg);
INIT_DELAYED_WORK(&xchg->timeout_work, unf_fc_io_xchg_timeout);
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
return xchg;
}
static void unf_add_back_to_sfs_list(
struct unf_xchg_free_pool_s *v_free_pool,
struct unf_xchg_s *v_xchg)
{
unsigned long flags = 0;
UNF_CHECK_VALID(0x945, UNF_TRUE, v_free_pool, return);
UNF_CHECK_VALID(0x946, UNF_TRUE, v_xchg, return);
unf_init_xchg_attribute(v_xchg);
spin_lock_irqsave(&v_free_pool->xchg_free_pool_lock, flags);
list_add_tail(&v_xchg->list_xchg_entry,
&v_free_pool->list_sfs_xchg_list);
v_free_pool->total_sfs_xchg++;
spin_unlock_irqrestore(&v_free_pool->xchg_free_pool_lock, flags);
}
static void unf_free_sfs_xchg(struct unf_xchg_s *v_xchg)
{
struct unf_xchg_free_pool_s *free_pool = NULL;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x947, UNF_TRUE, v_xchg, return);
free_pool = v_xchg->free_pool;
lport = v_xchg->lport;
xchg_mgr = v_xchg->xchg_mgr;
/* The memory is applied for when the GID_PT/GID_FT is sent.
* If no response is received, the GID_PT/GID_FT
* needs to be forcibly released.
*/
unf_free_one_big_sfs(v_xchg);
unf_add_back_to_sfs_list(free_pool, v_xchg);
if (unlikely(lport->b_port_removing == UNF_TRUE))
unf_check_xchg_mgr_status(xchg_mgr);
}
static void unf_fc_xchg_add_timer(void *v_xchg,
unsigned long v_time_ms,
enum unf_timer_type_e v_en_time_type)
{
unsigned long flag = 0;
struct unf_xchg_s *xchg = NULL;
unsigned long time_ms = v_time_ms;
struct unf_lport_s *lport;
UNF_CHECK_VALID(0x948, UNF_TRUE, v_xchg, return);
xchg = (struct unf_xchg_s *)v_xchg;
lport = xchg->lport;
UNF_CHECK_VALID(0x948, UNF_TRUE, lport, return);
/* update timeout */
switch (v_en_time_type) {
case UNF_TIMER_TYPE_INI_RRQ:
time_ms = time_ms - UNF_INI_RRQ_REDUNDANT_TIME;
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_INFO, "INI RRQ Timer set.");
break;
case UNF_TIMER_TYPE_SFS:
time_ms = time_ms + UNF_INI_ELS_REDUNDANT_TIME;
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_INFO, "INI ELS Timer set.");
break;
default:
break;
}
/* The xchg of the timer must be valid.
* If the reference count of xchg is 0,
* the timer must not be added
*/
if (atomic_read(&xchg->ref_cnt) <= 0) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT,
"[warn]Abnormal Exchange(0x%p), Reference count(0x%x), Can't add timer.",
xchg, atomic_read(&xchg->ref_cnt));
return;
}
/* Delay Work: Hold for timer */
spin_lock_irqsave(&xchg->xchg_state_lock, flag);
if (queue_delayed_work(lport->xchg_wq,
&xchg->timeout_work,
(unsigned long)
msecs_to_jiffies((unsigned int)time_ms))) {
/* hold for timer */
atomic_inc(&xchg->ref_cnt);
}
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
}
static void unf_sfs_xchg_timeout(struct work_struct *v_work)
{
struct unf_xchg_s *xchg = NULL;
unsigned int ret = UNF_RETURN_ERROR;
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x949, UNF_TRUE, v_work, return);
xchg = container_of(v_work, struct unf_xchg_s, timeout_work.work);
UNF_CHECK_VALID(0x950, UNF_TRUE, xchg, return);
ret = unf_xchg_ref_inc(xchg, SFS_TIMEOUT);
UNF_REFERNCE_VAR(ret);
UNF_CHECK_VALID(0x951, UNF_TRUE, ret == RETURN_OK, return);
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
lport = xchg->lport;
rport = xchg->rport;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
unf_xchg_ref_dec(xchg, SFS_TIMEOUT);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]SFS Exch(%p) Cmnd(0x%x) IO Exch(0x%p) Sid_Did(0x%x:0x%x) HotTag(0x%x) State(0x%x) Timeout.",
xchg, xchg->cmnd_code, xchg->io_xchg, xchg->sid,
xchg->did, xchg->hot_pool_tag, xchg->io_state);
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
if ((xchg->io_state & TGT_IO_STATE_ABORT) &&
(xchg->cmnd_code != ELS_RRQ) &&
(xchg->cmnd_code != ELS_LOGO)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"SFS Exch(0x%p) Cmnd(0x%x) Hot Pool Tag(0x%x) timeout, but aborted, no need to handle.",
xchg, xchg->cmnd_code, xchg->hot_pool_tag);
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
unf_xchg_ref_dec(xchg, SFS_TIMEOUT);
unf_xchg_ref_dec(xchg, SFS_TIMEOUT);
return;
}
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
/* The sfs times out. If the sfs is ELS reply,
* go to unf_rport_error_recovery/unf_lport_error_recovery.
* Otherwise, go to the corresponding obCallback.
*/
if (UNF_XCHG_IS_ELS_REPLY(xchg) && (rport)) {
if (rport->nport_id >= UNF_FC_FID_DOM_MGR)
unf_lport_error_recovery(lport);
else
unf_rport_error_recovery(rport);
} else if (xchg->pfn_ob_callback) {
xchg->pfn_ob_callback(xchg);
} else {
/* Do nothing */
}
unf_xchg_ref_dec(xchg, SFS_TIMEOUT);
unf_xchg_ref_dec(xchg, SFS_TIMEOUT);
}
static struct unf_xchg_s *unf_alloc_sfs_xchg(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr,
unsigned int v_xchg_type,
unsigned short v_rx_id)
{
struct unf_xchg_s *xchg = NULL;
struct list_head *list_node = NULL;
struct unf_xchg_free_pool_s *free_pool = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x952, UNF_TRUE, v_lport, return NULL);
UNF_CHECK_VALID(0x953, UNF_TRUE, v_xchg_mgr, return NULL);
free_pool = &v_xchg_mgr->free_pool;
hot_pool = v_xchg_mgr->hot_pool;
UNF_CHECK_VALID(0x954, UNF_TRUE, free_pool, return NULL);
UNF_CHECK_VALID(0x955, UNF_TRUE, hot_pool, return NULL);
/* Select an idle node from free pool */
spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags);
if (list_empty(&free_pool->list_sfs_xchg_list)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) have no Exchange anymore.",
v_lport->port_id);
spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags);
return NULL;
}
list_node = (&free_pool->list_sfs_xchg_list)->next;
list_del(list_node);
free_pool->total_sfs_xchg--;
spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags);
xchg = list_entry(list_node, struct unf_xchg_s, list_xchg_entry);
/*
* The xchg is mounted to the Hot Pool.
* The mount mode and release mode of the xchg must be specified
* and stored in the sfs linked list.
*/
flags = 0;
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
if (unf_alloc_hot_pool_slab(hot_pool, xchg, v_rx_id) != RETURN_OK) {
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
unf_add_back_to_sfs_list(free_pool, xchg);
if (unlikely(v_lport->b_port_removing == UNF_TRUE))
unf_check_xchg_mgr_status(v_xchg_mgr);
return NULL;
}
list_add_tail(&xchg->list_xchg_entry, &hot_pool->sfs_busylist);
hot_pool->total_xchges++;
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
xchg->free_pool = free_pool;
xchg->hot_pool = hot_pool;
xchg->lport = v_lport;
xchg->xchg_mgr = v_xchg_mgr;
xchg->pfn_free_xchg = unf_free_sfs_xchg;
xchg->xchg_type = v_xchg_type;
xchg->io_state = UNF_IO_STATE_NEW;
xchg->scsi_cmnd_info.result = 0;
xchg->ob_callback_sts = UNF_IO_SUCCESS;
xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] =
(unsigned int)atomic64_inc_return(&v_lport->exchg_index);
if (xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0)
xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] =
(unsigned int)
atomic64_inc_return(&v_lport->exchg_index);
if (delayed_work_pending(&xchg->timeout_work))
UNF_DEL_XCHG_TIMER_SAFE(xchg);
INIT_DELAYED_WORK(&xchg->timeout_work, unf_sfs_xchg_timeout);
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
return xchg;
}
static void *unf_get_new_xchg(void *v_lport, unsigned int v_xchg_type,
unsigned short v_rx_id)
{
struct unf_lport_s *lport = NULL;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned int xchg_type = 0;
unsigned short xchg_mgr_type;
unsigned int rtry_cnt = 0;
unsigned int last_exchg_mgr_idx;
xchg_mgr_type = (v_xchg_type >> 16);
xchg_type = v_xchg_type & 0xFFFF;
UNF_CHECK_VALID(0x956, UNF_TRUE, v_lport, return NULL);
/* In the case of NPIV, the v_lport is the Vport pointer,
* and the share uses the ExchMgr of the RootLport.
*/
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x957, UNF_TRUE, (lport), return NULL);
if (unlikely((atomic_read(&lport->port_no_operater_flag) ==
UNF_LPORT_NOP) ||
(atomic_read(&((struct unf_lport_s *)v_lport)->port_no_operater_flag) ==
UNF_LPORT_NOP)))
return NULL;
last_exchg_mgr_idx =
(unsigned int)atomic64_inc_return(&lport->last_exchg_mgr_idx);
try_next_mgr:
rtry_cnt++;
if (unlikely(rtry_cnt > UNF_EXCHG_MGR_NUM))
return NULL;
/* If Fixed mode,only use XchgMgr 0 */
if (unlikely(xchg_mgr_type == UNF_XCHG_MGR_TYPE_FIXED))
xchg_mgr = (struct unf_xchg_mgr_s *)lport->p_xchg_mgr[0];
else
xchg_mgr =
(struct unf_xchg_mgr_s *)
lport->p_xchg_mgr[last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM];
if (unlikely(!xchg_mgr)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) get exchangemgr %u is null.",
lport->port_id,
last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM);
return NULL;
}
last_exchg_mgr_idx++;
/* Allocate entries based on the Exchange type */
switch (xchg_type) {
case UNF_XCHG_TYPE_SFS:
xchg = unf_alloc_sfs_xchg(v_lport, xchg_mgr, xchg_type,
INVALID_VALUE16);
break;
case UNF_XCHG_TYPE_INI:
xchg = unf_alloc_io_xchg(v_lport, xchg_mgr, xchg_type,
INVALID_VALUE16);
break;
default:
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) unwonted, Exchange type(0x%x).",
lport->port_id, xchg_type);
break;
}
if (likely(xchg)) {
xchg->ox_id = INVALID_VALUE16;
xchg->abort_oxid = INVALID_VALUE16;
xchg->rx_id = INVALID_VALUE16;
xchg->debug_hook = UNF_FALSE;
xchg->alloc_jif = jiffies;
atomic_set(&xchg->ref_cnt, 1);
atomic_set(&xchg->esgl_cnt, 0);
} else {
goto try_next_mgr;
}
return xchg;
}
static void unf_free_xchg(void *v_lport, void *v_xchg)
{
struct unf_xchg_s *xchg = NULL;
UNF_REFERNCE_VAR(v_lport);
UNF_CHECK_VALID(0x958, UNF_TRUE, (v_xchg), return);
xchg = (struct unf_xchg_s *)v_xchg;
unf_xchg_ref_dec(xchg, XCHG_FREE_XCHG);
}
void unf_release_xchg_mgr_temp(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x960, UNF_TRUE, v_lport, return);
if (v_lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) has dirty exchange, Don't release exchange manager template.",
v_lport->port_id);
return;
}
memset(&v_lport->xchg_mgr_temp, 0,
sizeof(struct unf_cm_xchg_mgr_template_s));
v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP;
}
static void unf_xchg_abort_all_sfs_xchg(struct unf_lport_s *v_lport,
int v_clean)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long pool_lock_flags = 0;
unsigned long xchg_lock_flags = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x961, UNF_TRUE, v_lport, return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(v_lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_MAJOR,
"Port(0x%x) Hot Pool is NULL.",
v_lport->port_id);
continue;
}
if (v_clean == UNF_FALSE) {
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
/* Clearing the SFS_Busy_list Exchange Resource */
list_for_each_safe(xchg_node, next_xchg_node,
&hot_pool->sfs_busylist) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock,
xchg_lock_flags);
if (atomic_read(&xchg->ref_cnt) > 0)
xchg->io_state |= TGT_IO_STATE_ABORT;
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_lock_flags);
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
} else {
continue;
}
}
}
static void unf_xchg_abort_ini_io_xchg(struct unf_lport_s *v_lport,
int v_clean)
{
/* Clean L_Port/V_Port Link Down I/O: Abort */
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long pool_lock_flags = 0;
unsigned long xchg_lock_flags = 0;
unsigned int io_state = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x962, UNF_TRUE, (v_lport), return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(v_lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT,
UNF_WARN,
"[warn]Port(0x%x) hot pool is NULL",
v_lport->port_id);
continue;
}
if (v_clean == UNF_FALSE) {
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
/* 1. Abort INI_Busy_List IO */
list_for_each_safe(xchg_node, next_xchg_node,
&hot_pool->ini_busylist) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock,
xchg_lock_flags);
if (atomic_read(&xchg->ref_cnt) > 0)
xchg->io_state |=
INI_IO_STATE_DRABORT | io_state;
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_lock_flags);
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
} else {
/* Do nothing, just return */
continue;
}
}
}
static void unf_xchg_abort_all_xchg(void *v_lport,
unsigned int v_xchg_type,
int v_clean)
{
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x964, UNF_TRUE, v_lport, return);
lport = (struct unf_lport_s *)v_lport;
switch (v_xchg_type) {
case UNF_XCHG_TYPE_SFS:
unf_xchg_abort_all_sfs_xchg(lport, v_clean);
break;
/* Clean L_Port/V_Port Link Down I/O: Abort */
case UNF_XCHG_TYPE_INI:
unf_xchg_abort_ini_io_xchg(lport, v_clean);
break;
default:
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) unknown exch type(0x%x)",
lport->port_id, v_xchg_type);
break;
}
}
static void unf_xchg_abort_ini_send_tm_cmd(void *v_lport,
void *v_rport,
unsigned long long v_lun_id)
{
/*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long flags = 0;
unsigned long xchg_flag = 0;
unsigned int i = 0;
unsigned long long raw_lunid = 0;
UNF_CHECK_VALID(0x981, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x981, UNF_TRUE, v_rport, return);
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x982, UNF_TRUE, (lport), return);
rport = (struct unf_rport_s *)v_rport;
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) hot pool is NULL",
lport->port_id);
continue;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
/* 1. for each exchange from busy list */
list_for_each_safe(node, next_node,
&hot_pool->ini_busylist) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
raw_lunid = *(unsigned long long *)
(xchg->fcp_cmnd.lun) >> 16 &
0x000000000000ffff;
if ((v_lun_id == raw_lunid) &&
(rport == xchg->rport)) {
spin_lock_irqsave(&xchg->xchg_state_lock,
xchg_flag);
xchg->io_state |= INI_IO_STATE_TMF_ABORT;
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flag);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_MAJOR,
"[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD",
xchg, xchg->io_state, lport->nport_id,
rport->nport_id, xchg->hot_pool_tag);
}
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
}
}
static void unf_xchg_abort_by_lun(void *v_lport,
void *v_rport,
unsigned long long v_lun_id,
void *v_tm_xchg,
int v_abort_all_lun_flag)
{
/* ABORT: set UP_ABORT tag for target LUN I/O */
struct unf_xchg_s *tm_xchg = (struct unf_xchg_s *)v_tm_xchg;
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[event]Port(0x%x) LUN_ID(0x%llx) TM_EXCH(0x%p) flag(%d)",
((struct unf_lport_s *)v_lport)->port_id,
v_lun_id, v_tm_xchg, v_abort_all_lun_flag);
/* for INI Mode */
if (!tm_xchg) {
/*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
unf_xchg_abort_ini_send_tm_cmd(v_lport, v_rport, v_lun_id);
return;
}
}
static void unf_xchg_abort_ini_tmf_target_reset(void *v_lport, void *v_rport)
{
/*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long flags = 0;
unsigned long xchg_flag = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x981, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x981, UNF_TRUE, v_rport, return);
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x982, UNF_TRUE, (lport), return);
rport = (struct unf_rport_s *)v_rport;
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) hot pool is NULL",
lport->port_id);
continue;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
/* 1. for each exchange from busy_list */
list_for_each_safe(node, next_node,
&hot_pool->ini_busylist) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
if (rport == xchg->rport) {
spin_lock_irqsave(&xchg->xchg_state_lock,
xchg_flag);
xchg->io_state |= INI_IO_STATE_TMF_ABORT;
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flag);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_MAJOR,
"[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD",
xchg, xchg->io_state,
lport->nport_id,
rport->nport_id, xchg->hot_pool_tag);
}
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
}
}
static void unf_xchg_abort_by_session(void *v_lport, void *v_rport)
{
/*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[event]Port(0x%x) Rport(0x%x) start session reset with TMF",
((struct unf_lport_s *)v_lport)->port_id,
((struct unf_rport_s *)v_rport)->nport_id);
unf_xchg_abort_ini_tmf_target_reset(v_lport, v_rport);
}
static void unf_ini_busy_io_xchg_abort(void *v_hot_pool, void *v_rport,
unsigned int v_sid, unsigned int v_did,
unsigned int v_extra_io_state)
{
/*
* for target session: Set (DRV) ABORT
* 1. R_Port remove
* 2. Send PLOGI_ACC callback
* 3. RCVD PLOGI
* 4. RCVD LOGO
*/
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct unf_xchg_s *xchg = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
struct unf_rport_s *rport = NULL;
unsigned long xchg_lock_flags = 0;
rport = (struct unf_rport_s *)v_rport;
hot_pool = (struct unf_xchg_hot_pool_s *)v_hot_pool;
/* ABORT INI IO: INI_BUSY_LIST */
list_for_each_safe(xchg_node, next_xchg_node,
&hot_pool->ini_busylist) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_flags);
if ((v_did == xchg->did) && (v_sid == xchg->sid) &&
(rport == xchg->rport) &&
(atomic_read(&xchg->ref_cnt) > 0)) {
xchg->scsi_cmnd_info.result =
UNF_SCSI_HOST(DID_IMM_RETRY);
xchg->io_state |= INI_IO_STATE_DRABORT;
xchg->io_state |= v_extra_io_state;
UNF_TRACE(UNF_EVTLOG_IO_INFO,
UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Abort INI:0x%p, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, %llu.",
xchg,
(unsigned int)xchg->hot_pool_tag,
(unsigned int)xchg->xchg_type,
(unsigned int)xchg->ox_id,
(unsigned int)xchg->rx_id,
(unsigned int)xchg->sid,
(unsigned int)xchg->did,
(unsigned int)xchg->io_state,
atomic_read(&xchg->ref_cnt),
xchg->alloc_jif);
}
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_lock_flags);
}
}
static void unf_xchg_mgr_io_xchg_abort(void *v_lport, void *v_rport,
unsigned int v_sid, unsigned int v_did,
unsigned int v_extra_io_state)
{
/*
* for target session: set ABORT
* 1. R_Port remove
* 2. Send PLOGI_ACC callback
* 3. RCVD PLOGI
* 4. RCVD LOGO
*/
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct unf_lport_s *lport = NULL;
unsigned long pool_lock_flags = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x983, UNF_TRUE, v_lport, return);
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x984, UNF_TRUE, lport, return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN,
UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) hot pool is NULL",
lport->port_id);
continue;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
/* 1. Clear INI (session) IO: INI Mode */
unf_ini_busy_io_xchg_abort(hot_pool, v_rport, v_sid,
v_did, v_extra_io_state);
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
}
}
static void unf_xchg_mgr_sfs_xchg_abort(void *v_lport, void *v_rport,
unsigned int v_sid, unsigned int v_did)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
struct unf_xchg_s *xchg = NULL;
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
unsigned long pool_lock_flags = 0;
unsigned long xchg_lock_flags = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x991, UNF_TRUE, (v_lport), return);
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x992, UNF_TRUE, (lport), return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(lport, i);
if (!hot_pool) {
UNF_TRACE(UNF_EVTLOG_IO_INFO,
UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) Hot Pool is NULL.",
lport->port_id);
continue;
}
rport = (struct unf_rport_s *)v_rport;
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
/* Clear the SFS exchange of the corresponding connection */
list_for_each_safe(xchg_node, next_xchg_node,
&hot_pool->sfs_busylist) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock,
xchg_lock_flags);
if ((v_did == xchg->did) && (v_sid == xchg->sid) &&
(rport == xchg->rport) &&
(atomic_read(&xchg->ref_cnt) > 0)) {
xchg->io_state |= TGT_IO_STATE_ABORT;
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_MAJOR,
"Abort SFS:0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.",
xchg,
(unsigned int)xchg->hot_pool_tag,
(unsigned int)xchg->xchg_type,
(unsigned int)xchg->ox_id,
(unsigned int)xchg->rx_id,
(unsigned int)xchg->sid,
(unsigned int)xchg->did,
(unsigned int)xchg->io_state,
atomic_read(&xchg->ref_cnt),
xchg->alloc_jif);
}
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_lock_flags);
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
}
}
unsigned int unf_init_xchg_mgr_temp(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x959, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
v_lport->xchg_mgr_temp.pfn_unf_xchg_get_free_and_init =
unf_get_new_xchg;
v_lport->xchg_mgr_temp.pfn_unf_xchg_release = unf_free_xchg;
v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag =
unf_lookup_xchg_by_tag;
v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_id =
unf_find_xchg_by_oxid;
v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer =
unf_fc_xchg_add_timer;
v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer =
unf_xchg_cancel_timer;
v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io =
unf_xchg_abort_all_xchg;
v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_cmnd_sn =
unf_lookup_xchg_by_cmnd_sn;
v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_lun =
unf_xchg_abort_by_lun;
v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_session =
unf_xchg_abort_by_session;
v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort =
unf_xchg_mgr_io_xchg_abort;
v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort =
unf_xchg_mgr_sfs_xchg_abort;
return RETURN_OK;
}
void unf_set_hot_pool_wait_state(struct unf_lport_s *v_lport,
enum int_e v_wait_state)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
unsigned long pool_lock_flags = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x965, UNF_TRUE, v_lport, return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(v_lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT,
UNF_WARN,
"[warn]Port(0x%x) hot pool is NULL",
v_lport->port_id);
continue;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
hot_pool->wait_state = v_wait_state;
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
}
}
unsigned int unf_xchg_ref_inc(struct unf_xchg_s *v_xchg,
enum unf_ioflow_id_e v_io_stage)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
unsigned long flags = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x967, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR);
if (unlikely(v_xchg->debug_hook == UNF_TRUE)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OX_ID_RX_ID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Stage(%s)",
v_xchg, v_xchg->io_state, v_xchg->sid,
v_xchg->did, v_xchg->ox_id, v_xchg->rx_id,
v_xchg->alloc_jif, atomic_read(&v_xchg->ref_cnt),
io_stage[v_io_stage].stage);
}
hot_pool = v_xchg->hot_pool;
UNF_CHECK_VALID(0x968, UNF_TRUE, hot_pool, return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_io_stage);
/* Exchange -> Hot Pool Tag check */
if (unlikely((v_xchg->hot_pool_tag >=
(hot_pool->slab_total_sum + hot_pool->base)) ||
(v_xchg->hot_pool_tag < hot_pool->base))) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Xchg(0x%p) S_ID(%xh) D_ID(0x%x) hot_pool_tag(0x%x) is bigger than slab total num(0x%x) base(0x%x)",
v_xchg, v_xchg->sid, v_xchg->did,
v_xchg->hot_pool_tag,
hot_pool->slab_total_sum + hot_pool->base,
hot_pool->base);
return UNF_RETURN_ERROR;
}
/* atomic read & inc */
spin_lock_irqsave(&v_xchg->xchg_state_lock, flags);
if (unlikely(atomic_read(&v_xchg->ref_cnt) <= 0)) {
ret = UNF_RETURN_ERROR;
} else {
if (unf_get_xchg_by_xchg_tag(hot_pool,
v_xchg->hot_pool_tag -
hot_pool->base) ==
v_xchg) {
atomic_inc(&v_xchg->ref_cnt);
ret = RETURN_OK;
} else {
ret = UNF_RETURN_ERROR;
}
}
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags);
return ret;
}
void unf_xchg_ref_dec(struct unf_xchg_s *v_xchg,
enum unf_ioflow_id_e v_io_stage)
{
/* Atomic dec ref_cnt & test, free exchange
* if necessary (ref_cnt==0)
*/
struct unf_xchg_hot_pool_s *hot_pool = NULL;
void (*pfn_free_xchg)(struct unf_xchg_s *) = NULL;
unsigned long flags = 0;
unsigned long xchg_lock_flags = 0;
UNF_CHECK_VALID(0x969, UNF_TRUE, (v_xchg), return);
if (v_xchg->debug_hook == UNF_TRUE) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OXID_RXID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Statge %s",
v_xchg, v_xchg->io_state, v_xchg->sid,
v_xchg->did, v_xchg->ox_id, v_xchg->rx_id,
v_xchg->alloc_jif, atomic_read(&v_xchg->ref_cnt),
io_stage[v_io_stage].stage);
}
hot_pool = v_xchg->hot_pool;
UNF_CHECK_VALID(0x970, UNF_TRUE, hot_pool, return);
UNF_CHECK_VALID(0x970, UNF_TRUE,
v_xchg->hot_pool_tag >= hot_pool->base, return);
UNF_REFERNCE_VAR(v_io_stage);
/*
* 1. Atomic dec & test
* 2. Free exchange if necessary (ref_cnt == 0)
*/
spin_lock_irqsave(&v_xchg->xchg_state_lock, xchg_lock_flags);
if (atomic_dec_and_test(&v_xchg->ref_cnt)) {
pfn_free_xchg = v_xchg->pfn_free_xchg;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock,
xchg_lock_flags);
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
unf_hot_pool_slab_set(hot_pool,
v_xchg->hot_pool_tag - hot_pool->base,
NULL);
/* Delete exchange list entry */
list_del_init(&v_xchg->list_xchg_entry);
hot_pool->total_xchges--;
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
// unf_free_fcp_xchg --->>> unf_done_ini_xchg
if (pfn_free_xchg)
pfn_free_xchg(v_xchg);
} else {
spin_unlock_irqrestore(&v_xchg->xchg_state_lock,
xchg_lock_flags);
}
}
bool unf_busy_io_completed(struct unf_lport_s *v_lport)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
unsigned long pool_lock_flags = 0;
unsigned int i;
UNF_CHECK_VALID(0x5841, UNF_TRUE, v_lport, return UNF_TRUE);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i);
if (unlikely(!xchg_mgr)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) Exchange Manager is NULL",
v_lport->port_id);
continue;
}
spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
if (!list_empty(&xchg_mgr->hot_pool->ini_busylist)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT,
UNF_INFO, "[info]Port(0x%x) ini busylist is not empty.",
v_lport->port_id);
spin_unlock_irqrestore(
&xchg_mgr->hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
return UNF_FALSE;
}
spin_unlock_irqrestore(
&xchg_mgr->hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
}
return UNF_TRUE;
}
C
1
https://gitee.com/openeuler/kernel.git
git@gitee.com:openeuler/kernel.git
openeuler
kernel
kernel
openEuler-22.03-LTS-SP1

搜索帮助