| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010 | 
							- /*
 
-  *  CFQ, or complete fairness queueing, disk scheduler.
 
-  *
 
-  *  Based on ideas from a previously unfinished io
 
-  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
 
-  *
 
-  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 
-  */
 
- #include <linux/module.h>
 
- #include <linux/slab.h>
 
- #include <linux/blkdev.h>
 
- #include <linux/elevator.h>
 
- #include <linux/jiffies.h>
 
- #include <linux/rbtree.h>
 
- #include <linux/ioprio.h>
 
- #include <linux/blktrace_api.h>
 
- #include "blk.h"
 
- #include "blk-cgroup.h"
 
- /*
 
-  * tunables
 
-  */
 
- /* max queue in one round of service */
 
- static const int cfq_quantum = 8;
 
- static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
 
- /* maximum backwards seek, in KiB */
 
- static const int cfq_back_max = 16 * 1024;
 
- /* penalty of a backwards seek */
 
- static const int cfq_back_penalty = 2;
 
- static const int cfq_slice_sync = HZ / 10;
 
- static int cfq_slice_async = HZ / 25;
 
- static const int cfq_slice_async_rq = 2;
 
- static int cfq_slice_idle = HZ / 125;
 
- static int cfq_group_idle = HZ / 125;
 
- static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
 
- static const int cfq_hist_divisor = 4;
 
- /*
 
-  * offset from end of service tree
 
-  */
 
- #define CFQ_IDLE_DELAY		(HZ / 5)
 
- /*
 
-  * below this threshold, we consider thinktime immediate
 
-  */
 
- #define CFQ_MIN_TT		(2)
 
- #define CFQ_SLICE_SCALE		(5)
 
- #define CFQ_HW_QUEUE_MIN	(5)
 
- #define CFQ_SERVICE_SHIFT       12
 
- #define CFQQ_SEEK_THR		(sector_t)(8 * 100)
 
- #define CFQQ_CLOSE_THR		(sector_t)(8 * 1024)
 
- #define CFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
 
- #define CFQQ_SEEKY(cfqq)	(hweight32(cfqq->seek_history) > 32/8)
 
- #define RQ_CIC(rq)		icq_to_cic((rq)->elv.icq)
 
- #define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elv.priv[0])
 
- #define RQ_CFQG(rq)		(struct cfq_group *) ((rq)->elv.priv[1])
 
- static struct kmem_cache *cfq_pool;
 
- #define CFQ_PRIO_LISTS		IOPRIO_BE_NR
 
- #define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
 
- #define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
 
- #define sample_valid(samples)	((samples) > 80)
 
- #define rb_entry_cfqg(node)	rb_entry((node), struct cfq_group, rb_node)
 
- struct cfq_ttime {
 
- 	unsigned long last_end_request;
 
- 	unsigned long ttime_total;
 
- 	unsigned long ttime_samples;
 
- 	unsigned long ttime_mean;
 
- };
 
- /*
 
-  * Most of our rbtree usage is for sorting with min extraction, so
 
-  * if we cache the leftmost node we don't have to walk down the tree
 
-  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
 
-  * move this into the elevator for the rq sorting as well.
 
-  */
 
- struct cfq_rb_root {
 
- 	struct rb_root rb;
 
- 	struct rb_node *left;
 
- 	unsigned count;
 
- 	unsigned total_weight;
 
- 	u64 min_vdisktime;
 
- 	struct cfq_ttime ttime;
 
- };
 
- #define CFQ_RB_ROOT	(struct cfq_rb_root) { .rb = RB_ROOT, \
 
- 			.ttime = {.last_end_request = jiffies,},}
 
- /*
 
-  * Per process-grouping structure
 
-  */
 
- struct cfq_queue {
 
- 	/* reference count */
 
- 	int ref;
 
- 	/* various state flags, see below */
 
- 	unsigned int flags;
 
- 	/* parent cfq_data */
 
- 	struct cfq_data *cfqd;
 
- 	/* service_tree member */
 
- 	struct rb_node rb_node;
 
- 	/* service_tree key */
 
- 	unsigned long rb_key;
 
- 	/* prio tree member */
 
- 	struct rb_node p_node;
 
- 	/* prio tree root we belong to, if any */
 
- 	struct rb_root *p_root;
 
- 	/* sorted list of pending requests */
 
- 	struct rb_root sort_list;
 
- 	/* if fifo isn't expired, next request to serve */
 
- 	struct request *next_rq;
 
- 	/* requests queued in sort_list */
 
- 	int queued[2];
 
- 	/* currently allocated requests */
 
- 	int allocated[2];
 
- 	/* fifo list of requests in sort_list */
 
- 	struct list_head fifo;
 
- 	/* time when queue got scheduled in to dispatch first request. */
 
- 	unsigned long dispatch_start;
 
- 	unsigned int allocated_slice;
 
- 	unsigned int slice_dispatch;
 
- 	/* time when first request from queue completed and slice started. */
 
- 	unsigned long slice_start;
 
- 	unsigned long slice_end;
 
- 	long slice_resid;
 
- 	/* pending priority requests */
 
- 	int prio_pending;
 
- 	/* number of requests that are on the dispatch list or inside driver */
 
- 	int dispatched;
 
- 	/* io prio of this group */
 
- 	unsigned short ioprio, org_ioprio;
 
- 	unsigned short ioprio_class;
 
- 	pid_t pid;
 
- 	u32 seek_history;
 
- 	sector_t last_request_pos;
 
- 	struct cfq_rb_root *service_tree;
 
- 	struct cfq_queue *new_cfqq;
 
- 	struct cfq_group *cfqg;
 
- 	/* Number of sectors dispatched from queue in single dispatch round */
 
- 	unsigned long nr_sectors;
 
- };
 
- /*
 
-  * First index in the service_trees.
 
-  * IDLE is handled separately, so it has negative index
 
-  */
 
- enum wl_prio_t {
 
- 	BE_WORKLOAD = 0,
 
- 	RT_WORKLOAD = 1,
 
- 	IDLE_WORKLOAD = 2,
 
- 	CFQ_PRIO_NR,
 
- };
 
- /*
 
-  * Second index in the service_trees.
 
-  */
 
- enum wl_type_t {
 
- 	ASYNC_WORKLOAD = 0,
 
- 	SYNC_NOIDLE_WORKLOAD = 1,
 
- 	SYNC_WORKLOAD = 2
 
- };
 
- struct cfqg_stats {
 
- #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
- 	/* total bytes transferred */
 
- 	struct blkg_rwstat		service_bytes;
 
- 	/* total IOs serviced, post merge */
 
- 	struct blkg_rwstat		serviced;
 
- 	/* number of ios merged */
 
- 	struct blkg_rwstat		merged;
 
- 	/* total time spent on device in ns, may not be accurate w/ queueing */
 
- 	struct blkg_rwstat		service_time;
 
- 	/* total time spent waiting in scheduler queue in ns */
 
- 	struct blkg_rwstat		wait_time;
 
- 	/* number of IOs queued up */
 
- 	struct blkg_rwstat		queued;
 
- 	/* total sectors transferred */
 
- 	struct blkg_stat		sectors;
 
- 	/* total disk time and nr sectors dispatched by this group */
 
- 	struct blkg_stat		time;
 
- #ifdef CONFIG_DEBUG_BLK_CGROUP
 
- 	/* time not charged to this cgroup */
 
- 	struct blkg_stat		unaccounted_time;
 
- 	/* sum of number of ios queued across all samples */
 
- 	struct blkg_stat		avg_queue_size_sum;
 
- 	/* count of samples taken for average */
 
- 	struct blkg_stat		avg_queue_size_samples;
 
- 	/* how many times this group has been removed from service tree */
 
- 	struct blkg_stat		dequeue;
 
- 	/* total time spent waiting for it to be assigned a timeslice. */
 
- 	struct blkg_stat		group_wait_time;
 
- 	/* time spent idling for this blkcg_gq */
 
- 	struct blkg_stat		idle_time;
 
- 	/* total time with empty current active q with other requests queued */
 
- 	struct blkg_stat		empty_time;
 
- 	/* fields after this shouldn't be cleared on stat reset */
 
- 	uint64_t			start_group_wait_time;
 
- 	uint64_t			start_idle_time;
 
- 	uint64_t			start_empty_time;
 
- 	uint16_t			flags;
 
- #endif	/* CONFIG_DEBUG_BLK_CGROUP */
 
- #endif	/* CONFIG_CFQ_GROUP_IOSCHED */
 
- };
 
- /* This is per cgroup per device grouping structure */
 
- struct cfq_group {
 
- 	/* must be the first member */
 
- 	struct blkg_policy_data pd;
 
- 	/* group service_tree member */
 
- 	struct rb_node rb_node;
 
- 	/* group service_tree key */
 
- 	u64 vdisktime;
 
- 	unsigned int weight;
 
- 	unsigned int new_weight;
 
- 	unsigned int dev_weight;
 
- 	/* number of cfqq currently on this group */
 
- 	int nr_cfqq;
 
- 	/*
 
- 	 * Per group busy queues average. Useful for workload slice calc. We
 
- 	 * create the array for each prio class but at run time it is used
 
- 	 * only for RT and BE class and slot for IDLE class remains unused.
 
- 	 * This is primarily done to avoid confusion and a gcc warning.
 
- 	 */
 
- 	unsigned int busy_queues_avg[CFQ_PRIO_NR];
 
- 	/*
 
- 	 * rr lists of queues with requests. We maintain service trees for
 
- 	 * RT and BE classes. These trees are subdivided in subclasses
 
- 	 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
 
- 	 * class there is no subclassification and all the cfq queues go on
 
- 	 * a single tree service_tree_idle.
 
- 	 * Counts are embedded in the cfq_rb_root
 
- 	 */
 
- 	struct cfq_rb_root service_trees[2][3];
 
- 	struct cfq_rb_root service_tree_idle;
 
- 	unsigned long saved_workload_slice;
 
- 	enum wl_type_t saved_workload;
 
- 	enum wl_prio_t saved_serving_prio;
 
- 	/* number of requests that are on the dispatch list or inside driver */
 
- 	int dispatched;
 
- 	struct cfq_ttime ttime;
 
- 	struct cfqg_stats stats;
 
- };
 
- struct cfq_io_cq {
 
- 	struct io_cq		icq;		/* must be the first member */
 
- 	struct cfq_queue	*cfqq[2];
 
- 	struct cfq_ttime	ttime;
 
- 	int			ioprio;		/* the current ioprio */
 
- #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
- 	uint64_t		blkcg_id;	/* the current blkcg ID */
 
- #endif
 
- };
 
- /*
 
-  * Per block device queue structure
 
-  */
 
- struct cfq_data {
 
- 	struct request_queue *queue;
 
- 	/* Root service tree for cfq_groups */
 
- 	struct cfq_rb_root grp_service_tree;
 
- 	struct cfq_group *root_group;
 
- 	/*
 
- 	 * The priority currently being served
 
- 	 */
 
- 	enum wl_prio_t serving_prio;
 
- 	enum wl_type_t serving_type;
 
- 	unsigned long workload_expires;
 
- 	struct cfq_group *serving_group;
 
- 	/*
 
- 	 * Each priority tree is sorted by next_request position.  These
 
- 	 * trees are used when determining if two or more queues are
 
- 	 * interleaving requests (see cfq_close_cooperator).
 
- 	 */
 
- 	struct rb_root prio_trees[CFQ_PRIO_LISTS];
 
- 	unsigned int busy_queues;
 
- 	unsigned int busy_sync_queues;
 
- 	int rq_in_driver;
 
- 	int rq_in_flight[2];
 
- 	/*
 
- 	 * queue-depth detection
 
- 	 */
 
- 	int rq_queued;
 
- 	int hw_tag;
 
- 	/*
 
- 	 * hw_tag can be
 
- 	 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
 
- 	 *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
 
- 	 *  0 => no NCQ
 
- 	 */
 
- 	int hw_tag_est_depth;
 
- 	unsigned int hw_tag_samples;
 
- 	/*
 
- 	 * idle window management
 
- 	 */
 
- 	struct timer_list idle_slice_timer;
 
- 	struct work_struct unplug_work;
 
- 	struct cfq_queue *active_queue;
 
- 	struct cfq_io_cq *active_cic;
 
- 	/*
 
- 	 * async queue for each priority case
 
- 	 */
 
- 	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
 
- 	struct cfq_queue *async_idle_cfqq;
 
- 	sector_t last_position;
 
- 	/*
 
- 	 * tunables, see top of file
 
- 	 */
 
- 	unsigned int cfq_quantum;
 
- 	unsigned int cfq_fifo_expire[2];
 
- 	unsigned int cfq_back_penalty;
 
- 	unsigned int cfq_back_max;
 
- 	unsigned int cfq_slice[2];
 
- 	unsigned int cfq_slice_async_rq;
 
- 	unsigned int cfq_slice_idle;
 
- 	unsigned int cfq_group_idle;
 
- 	unsigned int cfq_latency;
 
- 	unsigned int cfq_target_latency;
 
- 	/*
 
- 	 * Fallback dummy cfqq for extreme OOM conditions
 
- 	 */
 
- 	struct cfq_queue oom_cfqq;
 
- 	unsigned long last_delayed_sync;
 
- };
 
- static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
 
- static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
 
- 					    enum wl_prio_t prio,
 
- 					    enum wl_type_t type)
 
- {
 
- 	if (!cfqg)
 
- 		return NULL;
 
- 	if (prio == IDLE_WORKLOAD)
 
- 		return &cfqg->service_tree_idle;
 
- 	return &cfqg->service_trees[prio][type];
 
- }
 
- enum cfqq_state_flags {
 
- 	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
 
- 	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
 
- 	CFQ_CFQQ_FLAG_must_dispatch,	/* must be allowed a dispatch */
 
- 	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
 
- 	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
 
- 	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
 
- 	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
 
- 	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
 
- 	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
 
- 	CFQ_CFQQ_FLAG_coop,		/* cfqq is shared */
 
- 	CFQ_CFQQ_FLAG_split_coop,	/* shared cfqq will be splitted */
 
- 	CFQ_CFQQ_FLAG_deep,		/* sync cfqq experienced large depth */
 
- 	CFQ_CFQQ_FLAG_wait_busy,	/* Waiting for next request */
 
- };
 
- #define CFQ_CFQQ_FNS(name)						\
 
- static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
 
- {									\
 
- 	(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
 
- }									\
 
- static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
 
- {									\
 
- 	(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
 
- }									\
 
- static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
 
- {									\
 
- 	return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
 
- }
 
- CFQ_CFQQ_FNS(on_rr);
 
- CFQ_CFQQ_FNS(wait_request);
 
- CFQ_CFQQ_FNS(must_dispatch);
 
- CFQ_CFQQ_FNS(must_alloc_slice);
 
- CFQ_CFQQ_FNS(fifo_expire);
 
- CFQ_CFQQ_FNS(idle_window);
 
- CFQ_CFQQ_FNS(prio_changed);
 
- CFQ_CFQQ_FNS(slice_new);
 
- CFQ_CFQQ_FNS(sync);
 
- CFQ_CFQQ_FNS(coop);
 
- CFQ_CFQQ_FNS(split_coop);
 
- CFQ_CFQQ_FNS(deep);
 
- CFQ_CFQQ_FNS(wait_busy);
 
- #undef CFQ_CFQQ_FNS
 
- static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
 
- {
 
- 	return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 
- }
 
- static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 
- {
 
- 	return pd_to_blkg(&cfqg->pd);
 
- }
 
- #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
 
- /* cfqg stats flags */
 
- enum cfqg_stats_flags {
 
- 	CFQG_stats_waiting = 0,
 
- 	CFQG_stats_idling,
 
- 	CFQG_stats_empty,
 
- };
 
- #define CFQG_FLAG_FNS(name)						\
 
- static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)	\
 
- {									\
 
- 	stats->flags |= (1 << CFQG_stats_##name);			\
 
- }									\
 
- static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)	\
 
- {									\
 
- 	stats->flags &= ~(1 << CFQG_stats_##name);			\
 
- }									\
 
- static inline int cfqg_stats_##name(struct cfqg_stats *stats)		\
 
- {									\
 
- 	return (stats->flags & (1 << CFQG_stats_##name)) != 0;		\
 
- }									\
 
- CFQG_FLAG_FNS(waiting)
 
- CFQG_FLAG_FNS(idling)
 
- CFQG_FLAG_FNS(empty)
 
- #undef CFQG_FLAG_FNS
 
- /* This should be called with the queue_lock held. */
 
- static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
 
- {
 
- 	unsigned long long now;
 
- 	if (!cfqg_stats_waiting(stats))
 
- 		return;
 
- 	now = sched_clock();
 
- 	if (time_after64(now, stats->start_group_wait_time))
 
- 		blkg_stat_add(&stats->group_wait_time,
 
- 			      now - stats->start_group_wait_time);
 
- 	cfqg_stats_clear_waiting(stats);
 
- }
 
- /* This should be called with the queue_lock held. */
 
- static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
 
- 						 struct cfq_group *curr_cfqg)
 
- {
 
- 	struct cfqg_stats *stats = &cfqg->stats;
 
- 	if (cfqg_stats_waiting(stats))
 
- 		return;
 
- 	if (cfqg == curr_cfqg)
 
- 		return;
 
- 	stats->start_group_wait_time = sched_clock();
 
- 	cfqg_stats_mark_waiting(stats);
 
- }
 
- /* This should be called with the queue_lock held. */
 
- static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
 
- {
 
- 	unsigned long long now;
 
- 	if (!cfqg_stats_empty(stats))
 
- 		return;
 
- 	now = sched_clock();
 
- 	if (time_after64(now, stats->start_empty_time))
 
- 		blkg_stat_add(&stats->empty_time,
 
- 			      now - stats->start_empty_time);
 
- 	cfqg_stats_clear_empty(stats);
 
- }
 
- static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
 
- {
 
- 	blkg_stat_add(&cfqg->stats.dequeue, 1);
 
- }
 
- static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
 
- {
 
- 	struct cfqg_stats *stats = &cfqg->stats;
 
- 	if (blkg_rwstat_sum(&stats->queued))
 
- 		return;
 
- 	/*
 
- 	 * group is already marked empty. This can happen if cfqq got new
 
- 	 * request in parent group and moved to this group while being added
 
- 	 * to service tree. Just ignore the event and move on.
 
- 	 */
 
- 	if (cfqg_stats_empty(stats))
 
- 		return;
 
- 	stats->start_empty_time = sched_clock();
 
- 	cfqg_stats_mark_empty(stats);
 
- }
 
- static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
 
- {
 
- 	struct cfqg_stats *stats = &cfqg->stats;
 
- 	if (cfqg_stats_idling(stats)) {
 
- 		unsigned long long now = sched_clock();
 
- 		if (time_after64(now, stats->start_idle_time))
 
- 			blkg_stat_add(&stats->idle_time,
 
- 				      now - stats->start_idle_time);
 
- 		cfqg_stats_clear_idling(stats);
 
- 	}
 
- }
 
- static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
 
- {
 
- 	struct cfqg_stats *stats = &cfqg->stats;
 
- 	BUG_ON(cfqg_stats_idling(stats));
 
- 	stats->start_idle_time = sched_clock();
 
- 	cfqg_stats_mark_idling(stats);
 
- }
 
- static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
 
- {
 
- 	struct cfqg_stats *stats = &cfqg->stats;
 
- 	blkg_stat_add(&stats->avg_queue_size_sum,
 
- 		      blkg_rwstat_sum(&stats->queued));
 
- 	blkg_stat_add(&stats->avg_queue_size_samples, 1);
 
- 	cfqg_stats_update_group_wait_time(stats);
 
- }
 
- #else	/* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 
- static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
 
- static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
 
- static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
 
- static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
 
- static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
 
- static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
 
- static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 
- #endif	/* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 
- #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
- static struct blkcg_policy blkcg_policy_cfq;
 
- static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
 
- {
 
- 	return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
 
- }
 
- static inline void cfqg_get(struct cfq_group *cfqg)
 
- {
 
- 	return blkg_get(cfqg_to_blkg(cfqg));
 
- }
 
- static inline void cfqg_put(struct cfq_group *cfqg)
 
- {
 
- 	return blkg_put(cfqg_to_blkg(cfqg));
 
- }
 
- #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	do {			\
 
- 	char __pbuf[128];						\
 
- 									\
 
- 	blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));	\
 
- 	blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
 
- 			  cfq_cfqq_sync((cfqq)) ? 'S' : 'A',		\
 
- 			  __pbuf, ##args);				\
 
- } while (0)
 
- #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)	do {			\
 
- 	char __pbuf[128];						\
 
- 									\
 
- 	blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));		\
 
- 	blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);	\
 
- } while (0)
 
- static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 
- 					    struct cfq_group *curr_cfqg, int rw)
 
- {
 
- 	blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
 
- 	cfqg_stats_end_empty_time(&cfqg->stats);
 
- 	cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
 
- }
 
- static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 
- 			unsigned long time, unsigned long unaccounted_time)
 
- {
 
- 	blkg_stat_add(&cfqg->stats.time, time);
 
- #ifdef CONFIG_DEBUG_BLK_CGROUP
 
- 	blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
 
- #endif
 
- }
 
- static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
 
- {
 
- 	blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
 
- }
 
- static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
 
- {
 
- 	blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
 
- }
 
- static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
 
- 					      uint64_t bytes, int rw)
 
- {
 
- 	blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
 
- 	blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
 
- 	blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
 
- }
 
- static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 
- 			uint64_t start_time, uint64_t io_start_time, int rw)
 
- {
 
- 	struct cfqg_stats *stats = &cfqg->stats;
 
- 	unsigned long long now = sched_clock();
 
- 	if (time_after64(now, io_start_time))
 
- 		blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
 
- 	if (time_after64(io_start_time, start_time))
 
- 		blkg_rwstat_add(&stats->wait_time, rw,
 
- 				io_start_time - start_time);
 
- }
 
- static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
 
- {
 
- 	struct cfq_group *cfqg = blkg_to_cfqg(blkg);
 
- 	struct cfqg_stats *stats = &cfqg->stats;
 
- 	/* queued stats shouldn't be cleared */
 
- 	blkg_rwstat_reset(&stats->service_bytes);
 
- 	blkg_rwstat_reset(&stats->serviced);
 
- 	blkg_rwstat_reset(&stats->merged);
 
- 	blkg_rwstat_reset(&stats->service_time);
 
- 	blkg_rwstat_reset(&stats->wait_time);
 
- 	blkg_stat_reset(&stats->time);
 
- #ifdef CONFIG_DEBUG_BLK_CGROUP
 
- 	blkg_stat_reset(&stats->unaccounted_time);
 
- 	blkg_stat_reset(&stats->avg_queue_size_sum);
 
- 	blkg_stat_reset(&stats->avg_queue_size_samples);
 
- 	blkg_stat_reset(&stats->dequeue);
 
- 	blkg_stat_reset(&stats->group_wait_time);
 
- 	blkg_stat_reset(&stats->idle_time);
 
- 	blkg_stat_reset(&stats->empty_time);
 
- #endif
 
- }
 
- #else	/* CONFIG_CFQ_GROUP_IOSCHED */
 
- static inline void cfqg_get(struct cfq_group *cfqg) { }
 
- static inline void cfqg_put(struct cfq_group *cfqg) { }
 
- #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
 
- 	blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
 
- #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0)
 
- static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 
- 			struct cfq_group *curr_cfqg, int rw) { }
 
- static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 
- 			unsigned long time, unsigned long unaccounted_time) { }
 
- static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
 
- static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
 
- static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
 
- 					      uint64_t bytes, int rw) { }
 
- static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 
- 			uint64_t start_time, uint64_t io_start_time, int rw) { }
 
- #endif	/* CONFIG_CFQ_GROUP_IOSCHED */
 
- #define cfq_log(cfqd, fmt, args...)	\
 
- 	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 
- /* Traverses through cfq group service trees */
 
- #define for_each_cfqg_st(cfqg, i, j, st) \
 
- 	for (i = 0; i <= IDLE_WORKLOAD; i++) \
 
- 		for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
 
- 			: &cfqg->service_tree_idle; \
 
- 			(i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
 
- 			(i == IDLE_WORKLOAD && j == 0); \
 
- 			j++, st = i < IDLE_WORKLOAD ? \
 
- 			&cfqg->service_trees[i][j]: NULL) \
 
- static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
 
- 	struct cfq_ttime *ttime, bool group_idle)
 
- {
 
- 	unsigned long slice;
 
- 	if (!sample_valid(ttime->ttime_samples))
 
- 		return false;
 
- 	if (group_idle)
 
- 		slice = cfqd->cfq_group_idle;
 
- 	else
 
- 		slice = cfqd->cfq_slice_idle;
 
- 	return ttime->ttime_mean > slice;
 
- }
 
- static inline bool iops_mode(struct cfq_data *cfqd)
 
- {
 
- 	/*
 
- 	 * If we are not idling on queues and it is a NCQ drive, parallel
 
- 	 * execution of requests is on and measuring time is not possible
 
- 	 * in most of the cases until and unless we drive shallower queue
 
- 	 * depths and that becomes a performance bottleneck. In such cases
 
- 	 * switch to start providing fairness in terms of number of IOs.
 
- 	 */
 
- 	if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
 
- 		return true;
 
- 	else
 
- 		return false;
 
- }
 
- static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
 
- {
 
- 	if (cfq_class_idle(cfqq))
 
- 		return IDLE_WORKLOAD;
 
- 	if (cfq_class_rt(cfqq))
 
- 		return RT_WORKLOAD;
 
- 	return BE_WORKLOAD;
 
- }
 
- static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
 
- {
 
- 	if (!cfq_cfqq_sync(cfqq))
 
- 		return ASYNC_WORKLOAD;
 
- 	if (!cfq_cfqq_idle_window(cfqq))
 
- 		return SYNC_NOIDLE_WORKLOAD;
 
- 	return SYNC_WORKLOAD;
 
- }
 
- static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
 
- 					struct cfq_data *cfqd,
 
- 					struct cfq_group *cfqg)
 
- {
 
- 	if (wl == IDLE_WORKLOAD)
 
- 		return cfqg->service_tree_idle.count;
 
- 	return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
 
- 		+ cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
 
- 		+ cfqg->service_trees[wl][SYNC_WORKLOAD].count;
 
- }
 
- static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
 
- 					struct cfq_group *cfqg)
 
- {
 
- 	return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
 
- 		+ cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
 
- }
 
- static void cfq_dispatch_insert(struct request_queue *, struct request *);
 
- static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
 
- 				       struct cfq_io_cq *cic, struct bio *bio,
 
- 				       gfp_t gfp_mask);
 
- static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
 
- {
 
- 	/* cic->icq is the first member, %NULL will convert to %NULL */
 
- 	return container_of(icq, struct cfq_io_cq, icq);
 
- }
 
- static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
 
- 					       struct io_context *ioc)
 
- {
 
- 	if (ioc)
 
- 		return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
 
- 	return NULL;
 
- }
 
- static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
 
- {
 
- 	return cic->cfqq[is_sync];
 
- }
 
- static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
 
- 				bool is_sync)
 
- {
 
- 	cic->cfqq[is_sync] = cfqq;
 
- }
 
- static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
 
- {
 
- 	return cic->icq.q->elevator->elevator_data;
 
- }
 
- /*
 
-  * We regard a request as SYNC, if it's either a read or has the SYNC bit
 
-  * set (in which case it could also be direct WRITE).
 
-  */
 
- static inline bool cfq_bio_sync(struct bio *bio)
 
- {
 
- 	return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
 
- }
 
- /*
 
-  * scheduler run of queue, if there are requests pending and no one in the
 
-  * driver that will restart queueing
 
-  */
 
- static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 
- {
 
- 	if (cfqd->busy_queues) {
 
- 		cfq_log(cfqd, "schedule dispatch");
 
- 		kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
 
- 	}
 
- }
 
- /*
 
-  * Scale schedule slice based on io priority. Use the sync time slice only
 
-  * if a queue is marked sync and has sync io queued. A sync queue with async
 
-  * io only, should not get full sync slice length.
 
-  */
 
- static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
 
- 				 unsigned short prio)
 
- {
 
- 	const int base_slice = cfqd->cfq_slice[sync];
 
- 	WARN_ON(prio >= IOPRIO_BE_NR);
 
- 	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
 
- }
 
- static inline int
 
- cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
 
- }
 
- static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
 
- {
 
- 	u64 d = delta << CFQ_SERVICE_SHIFT;
 
- 	d = d * CFQ_WEIGHT_DEFAULT;
 
- 	do_div(d, cfqg->weight);
 
- 	return d;
 
- }
 
- static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
 
- {
 
- 	s64 delta = (s64)(vdisktime - min_vdisktime);
 
- 	if (delta > 0)
 
- 		min_vdisktime = vdisktime;
 
- 	return min_vdisktime;
 
- }
 
- static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
 
- {
 
- 	s64 delta = (s64)(vdisktime - min_vdisktime);
 
- 	if (delta < 0)
 
- 		min_vdisktime = vdisktime;
 
- 	return min_vdisktime;
 
- }
 
- static void update_min_vdisktime(struct cfq_rb_root *st)
 
- {
 
- 	struct cfq_group *cfqg;
 
- 	if (st->left) {
 
- 		cfqg = rb_entry_cfqg(st->left);
 
- 		st->min_vdisktime = max_vdisktime(st->min_vdisktime,
 
- 						  cfqg->vdisktime);
 
- 	}
 
- }
 
- /*
 
-  * get averaged number of queues of RT/BE priority.
 
-  * average is updated, with a formula that gives more weight to higher numbers,
 
-  * to quickly follows sudden increases and decrease slowly
 
-  */
 
- static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
 
- 					struct cfq_group *cfqg, bool rt)
 
- {
 
- 	unsigned min_q, max_q;
 
- 	unsigned mult  = cfq_hist_divisor - 1;
 
- 	unsigned round = cfq_hist_divisor / 2;
 
- 	unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
 
- 	min_q = min(cfqg->busy_queues_avg[rt], busy);
 
- 	max_q = max(cfqg->busy_queues_avg[rt], busy);
 
- 	cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
 
- 		cfq_hist_divisor;
 
- 	return cfqg->busy_queues_avg[rt];
 
- }
 
- static inline unsigned
 
- cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
 
- {
 
- 	struct cfq_rb_root *st = &cfqd->grp_service_tree;
 
- 	return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
 
- }
 
- static inline unsigned
 
- cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
 
- 	if (cfqd->cfq_latency) {
 
- 		/*
 
- 		 * interested queues (we consider only the ones with the same
 
- 		 * priority class in the cfq group)
 
- 		 */
 
- 		unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
 
- 						cfq_class_rt(cfqq));
 
- 		unsigned sync_slice = cfqd->cfq_slice[1];
 
- 		unsigned expect_latency = sync_slice * iq;
 
- 		unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
 
- 		if (expect_latency > group_slice) {
 
- 			unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
 
- 			/* scale low_slice according to IO priority
 
- 			 * and sync vs async */
 
- 			unsigned low_slice =
 
- 				min(slice, base_low_slice * slice / sync_slice);
 
- 			/* the adapted slice value is scaled to fit all iqs
 
- 			 * into the target latency */
 
- 			slice = max(slice * group_slice / expect_latency,
 
- 				    low_slice);
 
- 		}
 
- 	}
 
- 	return slice;
 
- }
 
- static inline void
 
- cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
 
- 	cfqq->slice_start = jiffies;
 
- 	cfqq->slice_end = jiffies + slice;
 
- 	cfqq->allocated_slice = slice;
 
- 	cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
 
- }
 
- /*
 
-  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
 
-  * isn't valid until the first request from the dispatch is activated
 
-  * and the slice time set.
 
-  */
 
- static inline bool cfq_slice_used(struct cfq_queue *cfqq)
 
- {
 
- 	if (cfq_cfqq_slice_new(cfqq))
 
- 		return false;
 
- 	if (time_before(jiffies, cfqq->slice_end))
 
- 		return false;
 
- 	return true;
 
- }
 
- /*
 
-  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
 
-  * We choose the request that is closest to the head right now. Distance
 
-  * behind the head is penalized and only allowed to a certain extent.
 
-  */
 
- static struct request *
 
- cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
 
- {
 
- 	sector_t s1, s2, d1 = 0, d2 = 0;
 
- 	unsigned long back_max;
 
- #define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
 
- #define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
 
- 	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
 
- 	if (rq1 == NULL || rq1 == rq2)
 
- 		return rq2;
 
- 	if (rq2 == NULL)
 
- 		return rq1;
 
- 	if (rq_is_sync(rq1) != rq_is_sync(rq2))
 
- 		return rq_is_sync(rq1) ? rq1 : rq2;
 
- 	if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
 
- 		return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
 
- 	s1 = blk_rq_pos(rq1);
 
- 	s2 = blk_rq_pos(rq2);
 
- 	/*
 
- 	 * by definition, 1KiB is 2 sectors
 
- 	 */
 
- 	back_max = cfqd->cfq_back_max * 2;
 
- 	/*
 
- 	 * Strict one way elevator _except_ in the case where we allow
 
- 	 * short backward seeks which are biased as twice the cost of a
 
- 	 * similar forward seek.
 
- 	 */
 
- 	if (s1 >= last)
 
- 		d1 = s1 - last;
 
- 	else if (s1 + back_max >= last)
 
- 		d1 = (last - s1) * cfqd->cfq_back_penalty;
 
- 	else
 
- 		wrap |= CFQ_RQ1_WRAP;
 
- 	if (s2 >= last)
 
- 		d2 = s2 - last;
 
- 	else if (s2 + back_max >= last)
 
- 		d2 = (last - s2) * cfqd->cfq_back_penalty;
 
- 	else
 
- 		wrap |= CFQ_RQ2_WRAP;
 
- 	/* Found required data */
 
- 	/*
 
- 	 * By doing switch() on the bit mask "wrap" we avoid having to
 
- 	 * check two variables for all permutations: --> faster!
 
- 	 */
 
- 	switch (wrap) {
 
- 	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
 
- 		if (d1 < d2)
 
- 			return rq1;
 
- 		else if (d2 < d1)
 
- 			return rq2;
 
- 		else {
 
- 			if (s1 >= s2)
 
- 				return rq1;
 
- 			else
 
- 				return rq2;
 
- 		}
 
- 	case CFQ_RQ2_WRAP:
 
- 		return rq1;
 
- 	case CFQ_RQ1_WRAP:
 
- 		return rq2;
 
- 	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
 
- 	default:
 
- 		/*
 
- 		 * Since both rqs are wrapped,
 
- 		 * start with the one that's further behind head
 
- 		 * (--> only *one* back seek required),
 
- 		 * since back seek takes more time than forward.
 
- 		 */
 
- 		if (s1 <= s2)
 
- 			return rq1;
 
- 		else
 
- 			return rq2;
 
- 	}
 
- }
 
- /*
 
-  * The below is leftmost cache rbtree addon
 
-  */
 
- static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
 
- {
 
- 	/* Service tree is empty */
 
- 	if (!root->count)
 
- 		return NULL;
 
- 	if (!root->left)
 
- 		root->left = rb_first(&root->rb);
 
- 	if (root->left)
 
- 		return rb_entry(root->left, struct cfq_queue, rb_node);
 
- 	return NULL;
 
- }
 
- static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
 
- {
 
- 	if (!root->left)
 
- 		root->left = rb_first(&root->rb);
 
- 	if (root->left)
 
- 		return rb_entry_cfqg(root->left);
 
- 	return NULL;
 
- }
 
- static void rb_erase_init(struct rb_node *n, struct rb_root *root)
 
- {
 
- 	rb_erase(n, root);
 
- 	RB_CLEAR_NODE(n);
 
- }
 
- static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
 
- {
 
- 	if (root->left == n)
 
- 		root->left = NULL;
 
- 	rb_erase_init(n, &root->rb);
 
- 	--root->count;
 
- }
 
- /*
 
-  * would be nice to take fifo expire time into account as well
 
-  */
 
- static struct request *
 
- cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
- 		  struct request *last)
 
- {
 
- 	struct rb_node *rbnext = rb_next(&last->rb_node);
 
- 	struct rb_node *rbprev = rb_prev(&last->rb_node);
 
- 	struct request *next = NULL, *prev = NULL;
 
- 	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
 
- 	if (rbprev)
 
- 		prev = rb_entry_rq(rbprev);
 
- 	if (rbnext)
 
- 		next = rb_entry_rq(rbnext);
 
- 	else {
 
- 		rbnext = rb_first(&cfqq->sort_list);
 
- 		if (rbnext && rbnext != &last->rb_node)
 
- 			next = rb_entry_rq(rbnext);
 
- 	}
 
- 	return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
 
- }
 
- static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
 
- 				      struct cfq_queue *cfqq)
 
- {
 
- 	/*
 
- 	 * just an approximation, should be ok.
 
- 	 */
 
- 	return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
 
- 		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
 
- }
 
- static inline s64
 
- cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
 
- {
 
- 	return cfqg->vdisktime - st->min_vdisktime;
 
- }
 
- static void
 
- __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
 
- {
 
- 	struct rb_node **node = &st->rb.rb_node;
 
- 	struct rb_node *parent = NULL;
 
- 	struct cfq_group *__cfqg;
 
- 	s64 key = cfqg_key(st, cfqg);
 
- 	int left = 1;
 
- 	while (*node != NULL) {
 
- 		parent = *node;
 
- 		__cfqg = rb_entry_cfqg(parent);
 
- 		if (key < cfqg_key(st, __cfqg))
 
- 			node = &parent->rb_left;
 
- 		else {
 
- 			node = &parent->rb_right;
 
- 			left = 0;
 
- 		}
 
- 	}
 
- 	if (left)
 
- 		st->left = &cfqg->rb_node;
 
- 	rb_link_node(&cfqg->rb_node, parent, node);
 
- 	rb_insert_color(&cfqg->rb_node, &st->rb);
 
- }
 
- static void
 
- cfq_update_group_weight(struct cfq_group *cfqg)
 
- {
 
- 	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
 
- 	if (cfqg->new_weight) {
 
- 		cfqg->weight = cfqg->new_weight;
 
- 		cfqg->new_weight = 0;
 
- 	}
 
- }
 
- static void
 
- cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
 
- {
 
- 	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
 
- 	cfq_update_group_weight(cfqg);
 
- 	__cfq_group_service_tree_add(st, cfqg);
 
- 	st->total_weight += cfqg->weight;
 
- }
 
- static void
 
- cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 
- {
 
- 	struct cfq_rb_root *st = &cfqd->grp_service_tree;
 
- 	struct cfq_group *__cfqg;
 
- 	struct rb_node *n;
 
- 	cfqg->nr_cfqq++;
 
- 	if (!RB_EMPTY_NODE(&cfqg->rb_node))
 
- 		return;
 
- 	/*
 
- 	 * Currently put the group at the end. Later implement something
 
- 	 * so that groups get lesser vtime based on their weights, so that
 
- 	 * if group does not loose all if it was not continuously backlogged.
 
- 	 */
 
- 	n = rb_last(&st->rb);
 
- 	if (n) {
 
- 		__cfqg = rb_entry_cfqg(n);
 
- 		cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
 
- 	} else
 
- 		cfqg->vdisktime = st->min_vdisktime;
 
- 	cfq_group_service_tree_add(st, cfqg);
 
- }
 
- static void
 
- cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
 
- {
 
- 	st->total_weight -= cfqg->weight;
 
- 	if (!RB_EMPTY_NODE(&cfqg->rb_node))
 
- 		cfq_rb_erase(&cfqg->rb_node, st);
 
- }
 
- static void
 
- cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
 
- {
 
- 	struct cfq_rb_root *st = &cfqd->grp_service_tree;
 
- 	BUG_ON(cfqg->nr_cfqq < 1);
 
- 	cfqg->nr_cfqq--;
 
- 	/* If there are other cfq queues under this group, don't delete it */
 
- 	if (cfqg->nr_cfqq)
 
- 		return;
 
- 	cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
 
- 	cfq_group_service_tree_del(st, cfqg);
 
- 	cfqg->saved_workload_slice = 0;
 
- 	cfqg_stats_update_dequeue(cfqg);
 
- }
 
- static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
 
- 						unsigned int *unaccounted_time)
 
- {
 
- 	unsigned int slice_used;
 
- 	/*
 
- 	 * Queue got expired before even a single request completed or
 
- 	 * got expired immediately after first request completion.
 
- 	 */
 
- 	if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
 
- 		/*
 
- 		 * Also charge the seek time incurred to the group, otherwise
 
- 		 * if there are mutiple queues in the group, each can dispatch
 
- 		 * a single request on seeky media and cause lots of seek time
 
- 		 * and group will never know it.
 
- 		 */
 
- 		slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
 
- 					1);
 
- 	} else {
 
- 		slice_used = jiffies - cfqq->slice_start;
 
- 		if (slice_used > cfqq->allocated_slice) {
 
- 			*unaccounted_time = slice_used - cfqq->allocated_slice;
 
- 			slice_used = cfqq->allocated_slice;
 
- 		}
 
- 		if (time_after(cfqq->slice_start, cfqq->dispatch_start))
 
- 			*unaccounted_time += cfqq->slice_start -
 
- 					cfqq->dispatch_start;
 
- 	}
 
- 	return slice_used;
 
- }
 
- static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
 
- 				struct cfq_queue *cfqq)
 
- {
 
- 	struct cfq_rb_root *st = &cfqd->grp_service_tree;
 
- 	unsigned int used_sl, charge, unaccounted_sl = 0;
 
- 	int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
 
- 			- cfqg->service_tree_idle.count;
 
- 	BUG_ON(nr_sync < 0);
 
- 	used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
 
- 	if (iops_mode(cfqd))
 
- 		charge = cfqq->slice_dispatch;
 
- 	else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
 
- 		charge = cfqq->allocated_slice;
 
- 	/* Can't update vdisktime while group is on service tree */
 
- 	cfq_group_service_tree_del(st, cfqg);
 
- 	cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
 
- 	/* If a new weight was requested, update now, off tree */
 
- 	cfq_group_service_tree_add(st, cfqg);
 
- 	/* This group is being expired. Save the context */
 
- 	if (time_after(cfqd->workload_expires, jiffies)) {
 
- 		cfqg->saved_workload_slice = cfqd->workload_expires
 
- 						- jiffies;
 
- 		cfqg->saved_workload = cfqd->serving_type;
 
- 		cfqg->saved_serving_prio = cfqd->serving_prio;
 
- 	} else
 
- 		cfqg->saved_workload_slice = 0;
 
- 	cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
 
- 					st->min_vdisktime);
 
- 	cfq_log_cfqq(cfqq->cfqd, cfqq,
 
- 		     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
 
- 		     used_sl, cfqq->slice_dispatch, charge,
 
- 		     iops_mode(cfqd), cfqq->nr_sectors);
 
- 	cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
 
- 	cfqg_stats_set_start_empty_time(cfqg);
 
- }
 
- /**
 
-  * cfq_init_cfqg_base - initialize base part of a cfq_group
 
-  * @cfqg: cfq_group to initialize
 
-  *
 
-  * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
 
-  * is enabled or not.
 
-  */
 
- static void cfq_init_cfqg_base(struct cfq_group *cfqg)
 
- {
 
- 	struct cfq_rb_root *st;
 
- 	int i, j;
 
- 	for_each_cfqg_st(cfqg, i, j, st)
 
- 		*st = CFQ_RB_ROOT;
 
- 	RB_CLEAR_NODE(&cfqg->rb_node);
 
- 	cfqg->ttime.last_end_request = jiffies;
 
- }
 
- #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
- static void cfq_pd_init(struct blkcg_gq *blkg)
 
- {
 
- 	struct cfq_group *cfqg = blkg_to_cfqg(blkg);
 
- 	cfq_init_cfqg_base(cfqg);
 
- 	cfqg->weight = blkg->blkcg->cfq_weight;
 
- }
 
- /*
 
-  * Search for the cfq group current task belongs to. request_queue lock must
 
-  * be held.
 
-  */
 
- static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
 
- 						struct blkcg *blkcg)
 
- {
 
- 	struct request_queue *q = cfqd->queue;
 
- 	struct cfq_group *cfqg = NULL;
 
- 	/* avoid lookup for the common case where there's no blkcg */
 
- 	if (blkcg == &blkcg_root) {
 
- 		cfqg = cfqd->root_group;
 
- 	} else {
 
- 		struct blkcg_gq *blkg;
 
- 		blkg = blkg_lookup_create(blkcg, q);
 
- 		if (!IS_ERR(blkg))
 
- 			cfqg = blkg_to_cfqg(blkg);
 
- 	}
 
- 	return cfqg;
 
- }
 
- static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
 
- {
 
- 	/* Currently, all async queues are mapped to root group */
 
- 	if (!cfq_cfqq_sync(cfqq))
 
- 		cfqg = cfqq->cfqd->root_group;
 
- 	cfqq->cfqg = cfqg;
 
- 	/* cfqq reference on cfqg */
 
- 	cfqg_get(cfqg);
 
- }
 
- static u64 cfqg_prfill_weight_device(struct seq_file *sf,
 
- 				     struct blkg_policy_data *pd, int off)
 
- {
 
- 	struct cfq_group *cfqg = pd_to_cfqg(pd);
 
- 	if (!cfqg->dev_weight)
 
- 		return 0;
 
- 	return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
 
- }
 
- static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
 
- 				    struct seq_file *sf)
 
- {
 
- 	blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
 
- 			  cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
 
- 			  false);
 
- 	return 0;
 
- }
 
- static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
 
- 			    struct seq_file *sf)
 
- {
 
- 	seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
 
- 	return 0;
 
- }
 
- static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
 
- 				  const char *buf)
 
- {
 
- 	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
 
- 	struct blkg_conf_ctx ctx;
 
- 	struct cfq_group *cfqg;
 
- 	int ret;
 
- 	ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
 
- 	if (ret)
 
- 		return ret;
 
- 	ret = -EINVAL;
 
- 	cfqg = blkg_to_cfqg(ctx.blkg);
 
- 	if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
 
- 		cfqg->dev_weight = ctx.v;
 
- 		cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
 
- 		ret = 0;
 
- 	}
 
- 	blkg_conf_finish(&ctx);
 
- 	return ret;
 
- }
 
- static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
 
- {
 
- 	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
 
- 	struct blkcg_gq *blkg;
 
- 	struct hlist_node *n;
 
- 	if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
 
- 		return -EINVAL;
 
- 	spin_lock_irq(&blkcg->lock);
 
- 	blkcg->cfq_weight = (unsigned int)val;
 
- 	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
 
- 		struct cfq_group *cfqg = blkg_to_cfqg(blkg);
 
- 		if (cfqg && !cfqg->dev_weight)
 
- 			cfqg->new_weight = blkcg->cfq_weight;
 
- 	}
 
- 	spin_unlock_irq(&blkcg->lock);
 
- 	return 0;
 
- }
 
- static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
 
- 			   struct seq_file *sf)
 
- {
 
- 	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
 
- 	blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
 
- 			  cft->private, false);
 
- 	return 0;
 
- }
 
- static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
 
- 			     struct seq_file *sf)
 
- {
 
- 	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
 
- 	blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
 
- 			  cft->private, true);
 
- 	return 0;
 
- }
 
- #ifdef CONFIG_DEBUG_BLK_CGROUP
 
- static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
 
- 				      struct blkg_policy_data *pd, int off)
 
- {
 
- 	struct cfq_group *cfqg = pd_to_cfqg(pd);
 
- 	u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
 
- 	u64 v = 0;
 
- 	if (samples) {
 
- 		v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
 
- 		do_div(v, samples);
 
- 	}
 
- 	__blkg_prfill_u64(sf, pd, v);
 
- 	return 0;
 
- }
 
- /* print avg_queue_size */
 
- static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
 
- 				     struct seq_file *sf)
 
- {
 
- 	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
 
- 	blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
 
- 			  &blkcg_policy_cfq, 0, false);
 
- 	return 0;
 
- }
 
- #endif	/* CONFIG_DEBUG_BLK_CGROUP */
 
- static struct cftype cfq_blkcg_files[] = {
 
- 	{
 
- 		.name = "weight_device",
 
- 		.read_seq_string = cfqg_print_weight_device,
 
- 		.write_string = cfqg_set_weight_device,
 
- 		.max_write_len = 256,
 
- 	},
 
- 	{
 
- 		.name = "weight",
 
- 		.read_seq_string = cfq_print_weight,
 
- 		.write_u64 = cfq_set_weight,
 
- 	},
 
- 	{
 
- 		.name = "time",
 
- 		.private = offsetof(struct cfq_group, stats.time),
 
- 		.read_seq_string = cfqg_print_stat,
 
- 	},
 
- 	{
 
- 		.name = "sectors",
 
- 		.private = offsetof(struct cfq_group, stats.sectors),
 
- 		.read_seq_string = cfqg_print_stat,
 
- 	},
 
- 	{
 
- 		.name = "io_service_bytes",
 
- 		.private = offsetof(struct cfq_group, stats.service_bytes),
 
- 		.read_seq_string = cfqg_print_rwstat,
 
- 	},
 
- 	{
 
- 		.name = "io_serviced",
 
- 		.private = offsetof(struct cfq_group, stats.serviced),
 
- 		.read_seq_string = cfqg_print_rwstat,
 
- 	},
 
- 	{
 
- 		.name = "io_service_time",
 
- 		.private = offsetof(struct cfq_group, stats.service_time),
 
- 		.read_seq_string = cfqg_print_rwstat,
 
- 	},
 
- 	{
 
- 		.name = "io_wait_time",
 
- 		.private = offsetof(struct cfq_group, stats.wait_time),
 
- 		.read_seq_string = cfqg_print_rwstat,
 
- 	},
 
- 	{
 
- 		.name = "io_merged",
 
- 		.private = offsetof(struct cfq_group, stats.merged),
 
- 		.read_seq_string = cfqg_print_rwstat,
 
- 	},
 
- 	{
 
- 		.name = "io_queued",
 
- 		.private = offsetof(struct cfq_group, stats.queued),
 
- 		.read_seq_string = cfqg_print_rwstat,
 
- 	},
 
- #ifdef CONFIG_DEBUG_BLK_CGROUP
 
- 	{
 
- 		.name = "avg_queue_size",
 
- 		.read_seq_string = cfqg_print_avg_queue_size,
 
- 	},
 
- 	{
 
- 		.name = "group_wait_time",
 
- 		.private = offsetof(struct cfq_group, stats.group_wait_time),
 
- 		.read_seq_string = cfqg_print_stat,
 
- 	},
 
- 	{
 
- 		.name = "idle_time",
 
- 		.private = offsetof(struct cfq_group, stats.idle_time),
 
- 		.read_seq_string = cfqg_print_stat,
 
- 	},
 
- 	{
 
- 		.name = "empty_time",
 
- 		.private = offsetof(struct cfq_group, stats.empty_time),
 
- 		.read_seq_string = cfqg_print_stat,
 
- 	},
 
- 	{
 
- 		.name = "dequeue",
 
- 		.private = offsetof(struct cfq_group, stats.dequeue),
 
- 		.read_seq_string = cfqg_print_stat,
 
- 	},
 
- 	{
 
- 		.name = "unaccounted_time",
 
- 		.private = offsetof(struct cfq_group, stats.unaccounted_time),
 
- 		.read_seq_string = cfqg_print_stat,
 
- 	},
 
- #endif	/* CONFIG_DEBUG_BLK_CGROUP */
 
- 	{ }	/* terminate */
 
- };
 
- #else /* GROUP_IOSCHED */
 
- static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
 
- 						struct blkcg *blkcg)
 
- {
 
- 	return cfqd->root_group;
 
- }
 
- static inline void
 
- cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
 
- 	cfqq->cfqg = cfqg;
 
- }
 
- #endif /* GROUP_IOSCHED */
 
- /*
 
-  * The cfqd->service_trees holds all pending cfq_queue's that have
 
-  * requests waiting to be processed. It is sorted in the order that
 
-  * we will service the queues.
 
-  */
 
- static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
- 				 bool add_front)
 
- {
 
- 	struct rb_node **p, *parent;
 
- 	struct cfq_queue *__cfqq;
 
- 	unsigned long rb_key;
 
- 	struct cfq_rb_root *service_tree;
 
- 	int left;
 
- 	int new_cfqq = 1;
 
- 	service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
 
- 						cfqq_type(cfqq));
 
- 	if (cfq_class_idle(cfqq)) {
 
- 		rb_key = CFQ_IDLE_DELAY;
 
- 		parent = rb_last(&service_tree->rb);
 
- 		if (parent && parent != &cfqq->rb_node) {
 
- 			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
 
- 			rb_key += __cfqq->rb_key;
 
- 		} else
 
- 			rb_key += jiffies;
 
- 	} else if (!add_front) {
 
- 		/*
 
- 		 * Get our rb key offset. Subtract any residual slice
 
- 		 * value carried from last service. A negative resid
 
- 		 * count indicates slice overrun, and this should position
 
- 		 * the next service time further away in the tree.
 
- 		 */
 
- 		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
 
- 		rb_key -= cfqq->slice_resid;
 
- 		cfqq->slice_resid = 0;
 
- 	} else {
 
- 		rb_key = -HZ;
 
- 		__cfqq = cfq_rb_first(service_tree);
 
- 		rb_key += __cfqq ? __cfqq->rb_key : jiffies;
 
- 	}
 
- 	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
 
- 		new_cfqq = 0;
 
- 		/*
 
- 		 * same position, nothing more to do
 
- 		 */
 
- 		if (rb_key == cfqq->rb_key &&
 
- 		    cfqq->service_tree == service_tree)
 
- 			return;
 
- 		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
 
- 		cfqq->service_tree = NULL;
 
- 	}
 
- 	left = 1;
 
- 	parent = NULL;
 
- 	cfqq->service_tree = service_tree;
 
- 	p = &service_tree->rb.rb_node;
 
- 	while (*p) {
 
- 		struct rb_node **n;
 
- 		parent = *p;
 
- 		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
 
- 		/*
 
- 		 * sort by key, that represents service time.
 
- 		 */
 
- 		if (time_before(rb_key, __cfqq->rb_key))
 
- 			n = &(*p)->rb_left;
 
- 		else {
 
- 			n = &(*p)->rb_right;
 
- 			left = 0;
 
- 		}
 
- 		p = n;
 
- 	}
 
- 	if (left)
 
- 		service_tree->left = &cfqq->rb_node;
 
- 	cfqq->rb_key = rb_key;
 
- 	rb_link_node(&cfqq->rb_node, parent, p);
 
- 	rb_insert_color(&cfqq->rb_node, &service_tree->rb);
 
- 	service_tree->count++;
 
- 	if (add_front || !new_cfqq)
 
- 		return;
 
- 	cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
 
- }
 
- static struct cfq_queue *
 
- cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
 
- 		     sector_t sector, struct rb_node **ret_parent,
 
- 		     struct rb_node ***rb_link)
 
- {
 
- 	struct rb_node **p, *parent;
 
- 	struct cfq_queue *cfqq = NULL;
 
- 	parent = NULL;
 
- 	p = &root->rb_node;
 
- 	while (*p) {
 
- 		struct rb_node **n;
 
- 		parent = *p;
 
- 		cfqq = rb_entry(parent, struct cfq_queue, p_node);
 
- 		/*
 
- 		 * Sort strictly based on sector.  Smallest to the left,
 
- 		 * largest to the right.
 
- 		 */
 
- 		if (sector > blk_rq_pos(cfqq->next_rq))
 
- 			n = &(*p)->rb_right;
 
- 		else if (sector < blk_rq_pos(cfqq->next_rq))
 
- 			n = &(*p)->rb_left;
 
- 		else
 
- 			break;
 
- 		p = n;
 
- 		cfqq = NULL;
 
- 	}
 
- 	*ret_parent = parent;
 
- 	if (rb_link)
 
- 		*rb_link = p;
 
- 	return cfqq;
 
- }
 
- static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	struct rb_node **p, *parent;
 
- 	struct cfq_queue *__cfqq;
 
- 	if (cfqq->p_root) {
 
- 		rb_erase(&cfqq->p_node, cfqq->p_root);
 
- 		cfqq->p_root = NULL;
 
- 	}
 
- 	if (cfq_class_idle(cfqq))
 
- 		return;
 
- 	if (!cfqq->next_rq)
 
- 		return;
 
- 	cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
 
- 	__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
 
- 				      blk_rq_pos(cfqq->next_rq), &parent, &p);
 
- 	if (!__cfqq) {
 
- 		rb_link_node(&cfqq->p_node, parent, p);
 
- 		rb_insert_color(&cfqq->p_node, cfqq->p_root);
 
- 	} else
 
- 		cfqq->p_root = NULL;
 
- }
 
- /*
 
-  * Update cfqq's position in the service tree.
 
-  */
 
- static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	/*
 
- 	 * Resorting requires the cfqq to be on the RR list already.
 
- 	 */
 
- 	if (cfq_cfqq_on_rr(cfqq)) {
 
- 		cfq_service_tree_add(cfqd, cfqq, 0);
 
- 		cfq_prio_tree_add(cfqd, cfqq);
 
- 	}
 
- }
 
- /*
 
-  * add to busy list of queues for service, trying to be fair in ordering
 
-  * the pending list according to last request service
 
-  */
 
- static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
 
- 	BUG_ON(cfq_cfqq_on_rr(cfqq));
 
- 	cfq_mark_cfqq_on_rr(cfqq);
 
- 	cfqd->busy_queues++;
 
- 	if (cfq_cfqq_sync(cfqq))
 
- 		cfqd->busy_sync_queues++;
 
- 	cfq_resort_rr_list(cfqd, cfqq);
 
- }
 
- /*
 
-  * Called when the cfqq no longer has requests pending, remove it from
 
-  * the service tree.
 
-  */
 
- static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
 
- 	BUG_ON(!cfq_cfqq_on_rr(cfqq));
 
- 	cfq_clear_cfqq_on_rr(cfqq);
 
- 	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
 
- 		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
 
- 		cfqq->service_tree = NULL;
 
- 	}
 
- 	if (cfqq->p_root) {
 
- 		rb_erase(&cfqq->p_node, cfqq->p_root);
 
- 		cfqq->p_root = NULL;
 
- 	}
 
- 	cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
 
- 	BUG_ON(!cfqd->busy_queues);
 
- 	cfqd->busy_queues--;
 
- 	if (cfq_cfqq_sync(cfqq))
 
- 		cfqd->busy_sync_queues--;
 
- }
 
- /*
 
-  * rb tree support functions
 
-  */
 
- static void cfq_del_rq_rb(struct request *rq)
 
- {
 
- 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
 
- 	const int sync = rq_is_sync(rq);
 
- 	BUG_ON(!cfqq->queued[sync]);
 
- 	cfqq->queued[sync]--;
 
- 	elv_rb_del(&cfqq->sort_list, rq);
 
- 	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
 
- 		/*
 
- 		 * Queue will be deleted from service tree when we actually
 
- 		 * expire it later. Right now just remove it from prio tree
 
- 		 * as it is empty.
 
- 		 */
 
- 		if (cfqq->p_root) {
 
- 			rb_erase(&cfqq->p_node, cfqq->p_root);
 
- 			cfqq->p_root = NULL;
 
- 		}
 
- 	}
 
- }
 
- static void cfq_add_rq_rb(struct request *rq)
 
- {
 
- 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
 
- 	struct cfq_data *cfqd = cfqq->cfqd;
 
- 	struct request *prev;
 
- 	cfqq->queued[rq_is_sync(rq)]++;
 
- 	elv_rb_add(&cfqq->sort_list, rq);
 
- 	if (!cfq_cfqq_on_rr(cfqq))
 
- 		cfq_add_cfqq_rr(cfqd, cfqq);
 
- 	/*
 
- 	 * check if this request is a better next-serve candidate
 
- 	 */
 
- 	prev = cfqq->next_rq;
 
- 	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
 
- 	/*
 
- 	 * adjust priority tree position, if ->next_rq changes
 
- 	 */
 
- 	if (prev != cfqq->next_rq)
 
- 		cfq_prio_tree_add(cfqd, cfqq);
 
- 	BUG_ON(!cfqq->next_rq);
 
- }
 
- static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
 
- {
 
- 	elv_rb_del(&cfqq->sort_list, rq);
 
- 	cfqq->queued[rq_is_sync(rq)]--;
 
- 	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
 
- 	cfq_add_rq_rb(rq);
 
- 	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
 
- 				 rq->cmd_flags);
 
- }
 
- static struct request *
 
- cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
 
- {
 
- 	struct task_struct *tsk = current;
 
- 	struct cfq_io_cq *cic;
 
- 	struct cfq_queue *cfqq;
 
- 	cic = cfq_cic_lookup(cfqd, tsk->io_context);
 
- 	if (!cic)
 
- 		return NULL;
 
- 	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
 
- 	if (cfqq) {
 
- 		sector_t sector = bio->bi_sector + bio_sectors(bio);
 
- 		return elv_rb_find(&cfqq->sort_list, sector);
 
- 	}
 
- 	return NULL;
 
- }
 
- static void cfq_activate_request(struct request_queue *q, struct request *rq)
 
- {
 
- 	struct cfq_data *cfqd = q->elevator->elevator_data;
 
- 	cfqd->rq_in_driver++;
 
- 	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
 
- 						cfqd->rq_in_driver);
 
- 	cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
 
- }
 
- static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
 
- {
 
- 	struct cfq_data *cfqd = q->elevator->elevator_data;
 
- 	WARN_ON(!cfqd->rq_in_driver);
 
- 	cfqd->rq_in_driver--;
 
- 	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
 
- 						cfqd->rq_in_driver);
 
- }
 
- static void cfq_remove_request(struct request *rq)
 
- {
 
- 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
 
- 	if (cfqq->next_rq == rq)
 
- 		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
 
- 	list_del_init(&rq->queuelist);
 
- 	cfq_del_rq_rb(rq);
 
- 	cfqq->cfqd->rq_queued--;
 
- 	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
 
- 	if (rq->cmd_flags & REQ_PRIO) {
 
- 		WARN_ON(!cfqq->prio_pending);
 
- 		cfqq->prio_pending--;
 
- 	}
 
- }
 
- static int cfq_merge(struct request_queue *q, struct request **req,
 
- 		     struct bio *bio)
 
- {
 
- 	struct cfq_data *cfqd = q->elevator->elevator_data;
 
- 	struct request *__rq;
 
- 	__rq = cfq_find_rq_fmerge(cfqd, bio);
 
- 	if (__rq && elv_rq_merge_ok(__rq, bio)) {
 
- 		*req = __rq;
 
- 		return ELEVATOR_FRONT_MERGE;
 
- 	}
 
- 	return ELEVATOR_NO_MERGE;
 
- }
 
- static void cfq_merged_request(struct request_queue *q, struct request *req,
 
- 			       int type)
 
- {
 
- 	if (type == ELEVATOR_FRONT_MERGE) {
 
- 		struct cfq_queue *cfqq = RQ_CFQQ(req);
 
- 		cfq_reposition_rq_rb(cfqq, req);
 
- 	}
 
- }
 
- static void cfq_bio_merged(struct request_queue *q, struct request *req,
 
- 				struct bio *bio)
 
- {
 
- 	cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
 
- }
 
- static void
 
- cfq_merged_requests(struct request_queue *q, struct request *rq,
 
- 		    struct request *next)
 
- {
 
- 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
 
- 	struct cfq_data *cfqd = q->elevator->elevator_data;
 
- 	/*
 
- 	 * reposition in fifo if next is older than rq
 
- 	 */
 
- 	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
 
- 	    time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
 
- 	    cfqq == RQ_CFQQ(next)) {
 
- 		list_move(&rq->queuelist, &next->queuelist);
 
- 		rq_set_fifo_time(rq, rq_fifo_time(next));
 
- 	}
 
- 	if (cfqq->next_rq == next)
 
- 		cfqq->next_rq = rq;
 
- 	cfq_remove_request(next);
 
- 	cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
 
- 	cfqq = RQ_CFQQ(next);
 
- 	/*
 
- 	 * all requests of this queue are merged to other queues, delete it
 
- 	 * from the service tree. If it's the active_queue,
 
- 	 * cfq_dispatch_requests() will choose to expire it or do idle
 
- 	 */
 
- 	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
 
- 	    cfqq != cfqd->active_queue)
 
- 		cfq_del_cfqq_rr(cfqd, cfqq);
 
- }
 
- static int cfq_allow_merge(struct request_queue *q, struct request *rq,
 
- 			   struct bio *bio)
 
- {
 
- 	struct cfq_data *cfqd = q->elevator->elevator_data;
 
- 	struct cfq_io_cq *cic;
 
- 	struct cfq_queue *cfqq;
 
- 	/*
 
- 	 * Disallow merge of a sync bio into an async request.
 
- 	 */
 
- 	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
 
- 		return false;
 
- 	/*
 
- 	 * Lookup the cfqq that this bio will be queued with and allow
 
- 	 * merge only if rq is queued there.
 
- 	 */
 
- 	cic = cfq_cic_lookup(cfqd, current->io_context);
 
- 	if (!cic)
 
- 		return false;
 
- 	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
 
- 	return cfqq == RQ_CFQQ(rq);
 
- }
 
- static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	del_timer(&cfqd->idle_slice_timer);
 
- 	cfqg_stats_update_idle_time(cfqq->cfqg);
 
- }
 
- static void __cfq_set_active_queue(struct cfq_data *cfqd,
 
- 				   struct cfq_queue *cfqq)
 
- {
 
- 	if (cfqq) {
 
- 		cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
 
- 				cfqd->serving_prio, cfqd->serving_type);
 
- 		cfqg_stats_update_avg_queue_size(cfqq->cfqg);
 
- 		cfqq->slice_start = 0;
 
- 		cfqq->dispatch_start = jiffies;
 
- 		cfqq->allocated_slice = 0;
 
- 		cfqq->slice_end = 0;
 
- 		cfqq->slice_dispatch = 0;
 
- 		cfqq->nr_sectors = 0;
 
- 		cfq_clear_cfqq_wait_request(cfqq);
 
- 		cfq_clear_cfqq_must_dispatch(cfqq);
 
- 		cfq_clear_cfqq_must_alloc_slice(cfqq);
 
- 		cfq_clear_cfqq_fifo_expire(cfqq);
 
- 		cfq_mark_cfqq_slice_new(cfqq);
 
- 		cfq_del_timer(cfqd, cfqq);
 
- 	}
 
- 	cfqd->active_queue = cfqq;
 
- }
 
- /*
 
-  * current cfqq expired its slice (or was too idle), select new one
 
-  */
 
- static void
 
- __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
- 		    bool timed_out)
 
- {
 
- 	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
 
- 	if (cfq_cfqq_wait_request(cfqq))
 
- 		cfq_del_timer(cfqd, cfqq);
 
- 	cfq_clear_cfqq_wait_request(cfqq);
 
- 	cfq_clear_cfqq_wait_busy(cfqq);
 
- 	/*
 
- 	 * If this cfqq is shared between multiple processes, check to
 
- 	 * make sure that those processes are still issuing I/Os within
 
- 	 * the mean seek distance.  If not, it may be time to break the
 
- 	 * queues apart again.
 
- 	 */
 
- 	if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
 
- 		cfq_mark_cfqq_split_coop(cfqq);
 
- 	/*
 
- 	 * store what was left of this slice, if the queue idled/timed out
 
- 	 */
 
- 	if (timed_out) {
 
- 		if (cfq_cfqq_slice_new(cfqq))
 
- 			cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
 
- 		else
 
- 			cfqq->slice_resid = cfqq->slice_end - jiffies;
 
- 		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
 
- 	}
 
- 	cfq_group_served(cfqd, cfqq->cfqg, cfqq);
 
- 	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
 
- 		cfq_del_cfqq_rr(cfqd, cfqq);
 
- 	cfq_resort_rr_list(cfqd, cfqq);
 
- 	if (cfqq == cfqd->active_queue)
 
- 		cfqd->active_queue = NULL;
 
- 	if (cfqd->active_cic) {
 
- 		put_io_context(cfqd->active_cic->icq.ioc);
 
- 		cfqd->active_cic = NULL;
 
- 	}
 
- }
 
- static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
 
- {
 
- 	struct cfq_queue *cfqq = cfqd->active_queue;
 
- 	if (cfqq)
 
- 		__cfq_slice_expired(cfqd, cfqq, timed_out);
 
- }
 
- /*
 
-  * Get next queue for service. Unless we have a queue preemption,
 
-  * we'll simply select the first cfqq in the service tree.
 
-  */
 
- static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
 
- {
 
- 	struct cfq_rb_root *service_tree =
 
- 		service_tree_for(cfqd->serving_group, cfqd->serving_prio,
 
- 					cfqd->serving_type);
 
- 	if (!cfqd->rq_queued)
 
- 		return NULL;
 
- 	/* There is nothing to dispatch */
 
- 	if (!service_tree)
 
- 		return NULL;
 
- 	if (RB_EMPTY_ROOT(&service_tree->rb))
 
- 		return NULL;
 
- 	return cfq_rb_first(service_tree);
 
- }
 
- static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
 
- {
 
- 	struct cfq_group *cfqg;
 
- 	struct cfq_queue *cfqq;
 
- 	int i, j;
 
- 	struct cfq_rb_root *st;
 
- 	if (!cfqd->rq_queued)
 
- 		return NULL;
 
- 	cfqg = cfq_get_next_cfqg(cfqd);
 
- 	if (!cfqg)
 
- 		return NULL;
 
- 	for_each_cfqg_st(cfqg, i, j, st)
 
- 		if ((cfqq = cfq_rb_first(st)) != NULL)
 
- 			return cfqq;
 
- 	return NULL;
 
- }
 
- /*
 
-  * Get and set a new active queue for service.
 
-  */
 
- static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
 
- 					      struct cfq_queue *cfqq)
 
- {
 
- 	if (!cfqq)
 
- 		cfqq = cfq_get_next_queue(cfqd);
 
- 	__cfq_set_active_queue(cfqd, cfqq);
 
- 	return cfqq;
 
- }
 
- static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
 
- 					  struct request *rq)
 
- {
 
- 	if (blk_rq_pos(rq) >= cfqd->last_position)
 
- 		return blk_rq_pos(rq) - cfqd->last_position;
 
- 	else
 
- 		return cfqd->last_position - blk_rq_pos(rq);
 
- }
 
- static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
- 			       struct request *rq)
 
- {
 
- 	return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
 
- }
 
- static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
 
- 				    struct cfq_queue *cur_cfqq)
 
- {
 
- 	struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
 
- 	struct rb_node *parent, *node;
 
- 	struct cfq_queue *__cfqq;
 
- 	sector_t sector = cfqd->last_position;
 
- 	if (RB_EMPTY_ROOT(root))
 
- 		return NULL;
 
- 	/*
 
- 	 * First, if we find a request starting at the end of the last
 
- 	 * request, choose it.
 
- 	 */
 
- 	__cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
 
- 	if (__cfqq)
 
- 		return __cfqq;
 
- 	/*
 
- 	 * If the exact sector wasn't found, the parent of the NULL leaf
 
- 	 * will contain the closest sector.
 
- 	 */
 
- 	__cfqq = rb_entry(parent, struct cfq_queue, p_node);
 
- 	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
 
- 		return __cfqq;
 
- 	if (blk_rq_pos(__cfqq->next_rq) < sector)
 
- 		node = rb_next(&__cfqq->p_node);
 
- 	else
 
- 		node = rb_prev(&__cfqq->p_node);
 
- 	if (!node)
 
- 		return NULL;
 
- 	__cfqq = rb_entry(node, struct cfq_queue, p_node);
 
- 	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
 
- 		return __cfqq;
 
- 	return NULL;
 
- }
 
- /*
 
-  * cfqd - obvious
 
-  * cur_cfqq - passed in so that we don't decide that the current queue is
 
-  * 	      closely cooperating with itself.
 
-  *
 
-  * So, basically we're assuming that that cur_cfqq has dispatched at least
 
-  * one request, and that cfqd->last_position reflects a position on the disk
 
-  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
 
-  * assumption.
 
-  */
 
- static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
 
- 					      struct cfq_queue *cur_cfqq)
 
- {
 
- 	struct cfq_queue *cfqq;
 
- 	if (cfq_class_idle(cur_cfqq))
 
- 		return NULL;
 
- 	if (!cfq_cfqq_sync(cur_cfqq))
 
- 		return NULL;
 
- 	if (CFQQ_SEEKY(cur_cfqq))
 
- 		return NULL;
 
- 	/*
 
- 	 * Don't search priority tree if it's the only queue in the group.
 
- 	 */
 
- 	if (cur_cfqq->cfqg->nr_cfqq == 1)
 
- 		return NULL;
 
- 	/*
 
- 	 * We should notice if some of the queues are cooperating, eg
 
- 	 * working closely on the same area of the disk. In that case,
 
- 	 * we can group them together and don't waste time idling.
 
- 	 */
 
- 	cfqq = cfqq_close(cfqd, cur_cfqq);
 
- 	if (!cfqq)
 
- 		return NULL;
 
- 	/* If new queue belongs to different cfq_group, don't choose it */
 
- 	if (cur_cfqq->cfqg != cfqq->cfqg)
 
- 		return NULL;
 
- 	/*
 
- 	 * It only makes sense to merge sync queues.
 
- 	 */
 
- 	if (!cfq_cfqq_sync(cfqq))
 
- 		return NULL;
 
- 	if (CFQQ_SEEKY(cfqq))
 
- 		return NULL;
 
- 	/*
 
- 	 * Do not merge queues of different priority classes
 
- 	 */
 
- 	if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
 
- 		return NULL;
 
- 	return cfqq;
 
- }
 
- /*
 
-  * Determine whether we should enforce idle window for this queue.
 
-  */
 
- static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	enum wl_prio_t prio = cfqq_prio(cfqq);
 
- 	struct cfq_rb_root *service_tree = cfqq->service_tree;
 
- 	BUG_ON(!service_tree);
 
- 	BUG_ON(!service_tree->count);
 
- 	if (!cfqd->cfq_slice_idle)
 
- 		return false;
 
- 	/* We never do for idle class queues. */
 
- 	if (prio == IDLE_WORKLOAD)
 
- 		return false;
 
- 	/* We do for queues that were marked with idle window flag. */
 
- 	if (cfq_cfqq_idle_window(cfqq) &&
 
- 	   !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
 
- 		return true;
 
- 	/*
 
- 	 * Otherwise, we do only if they are the last ones
 
- 	 * in their service tree.
 
- 	 */
 
- 	if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
 
- 	   !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
 
- 		return true;
 
- 	cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
 
- 			service_tree->count);
 
- 	return false;
 
- }
 
- static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 
- {
 
- 	struct cfq_queue *cfqq = cfqd->active_queue;
 
- 	struct cfq_io_cq *cic;
 
- 	unsigned long sl, group_idle = 0;
 
- 	/*
 
- 	 * SSD device without seek penalty, disable idling. But only do so
 
- 	 * for devices that support queuing, otherwise we still have a problem
 
- 	 * with sync vs async workloads.
 
- 	 */
 
- 	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
 
- 		return;
 
- 	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
 
- 	WARN_ON(cfq_cfqq_slice_new(cfqq));
 
- 	/*
 
- 	 * idle is disabled, either manually or by past process history
 
- 	 */
 
- 	if (!cfq_should_idle(cfqd, cfqq)) {
 
- 		/* no queue idling. Check for group idling */
 
- 		if (cfqd->cfq_group_idle)
 
- 			group_idle = cfqd->cfq_group_idle;
 
- 		else
 
- 			return;
 
- 	}
 
- 	/*
 
- 	 * still active requests from this queue, don't idle
 
- 	 */
 
- 	if (cfqq->dispatched)
 
- 		return;
 
- 	/*
 
- 	 * task has exited, don't wait
 
- 	 */
 
- 	cic = cfqd->active_cic;
 
- 	if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
 
- 		return;
 
- 	/*
 
- 	 * If our average think time is larger than the remaining time
 
- 	 * slice, then don't idle. This avoids overrunning the allotted
 
- 	 * time slice.
 
- 	 */
 
- 	if (sample_valid(cic->ttime.ttime_samples) &&
 
- 	    (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
 
- 		cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
 
- 			     cic->ttime.ttime_mean);
 
- 		return;
 
- 	}
 
- 	/* There are other queues in the group, don't do group idle */
 
- 	if (group_idle && cfqq->cfqg->nr_cfqq > 1)
 
- 		return;
 
- 	cfq_mark_cfqq_wait_request(cfqq);
 
- 	if (group_idle)
 
- 		sl = cfqd->cfq_group_idle;
 
- 	else
 
- 		sl = cfqd->cfq_slice_idle;
 
- 	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
 
- 	cfqg_stats_set_start_idle_time(cfqq->cfqg);
 
- 	cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
 
- 			group_idle ? 1 : 0);
 
- }
 
- /*
 
-  * Move request from internal lists to the request queue dispatch list.
 
-  */
 
- static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
 
- {
 
- 	struct cfq_data *cfqd = q->elevator->elevator_data;
 
- 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
 
- 	cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
 
- 	cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
 
- 	cfq_remove_request(rq);
 
- 	cfqq->dispatched++;
 
- 	(RQ_CFQG(rq))->dispatched++;
 
- 	elv_dispatch_sort(q, rq);
 
- 	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
 
- 	cfqq->nr_sectors += blk_rq_sectors(rq);
 
- 	cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
 
- }
 
- /*
 
-  * return expired entry, or NULL to just start from scratch in rbtree
 
-  */
 
- static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
 
- {
 
- 	struct request *rq = NULL;
 
- 	if (cfq_cfqq_fifo_expire(cfqq))
 
- 		return NULL;
 
- 	cfq_mark_cfqq_fifo_expire(cfqq);
 
- 	if (list_empty(&cfqq->fifo))
 
- 		return NULL;
 
- 	rq = rq_entry_fifo(cfqq->fifo.next);
 
- 	if (time_before(jiffies, rq_fifo_time(rq)))
 
- 		rq = NULL;
 
- 	cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
 
- 	return rq;
 
- }
 
- static inline int
 
- cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	const int base_rq = cfqd->cfq_slice_async_rq;
 
- 	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
 
- 	return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
 
- }
 
- /*
 
-  * Must be called with the queue_lock held.
 
-  */
 
- static int cfqq_process_refs(struct cfq_queue *cfqq)
 
- {
 
- 	int process_refs, io_refs;
 
- 	io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
 
- 	process_refs = cfqq->ref - io_refs;
 
- 	BUG_ON(process_refs < 0);
 
- 	return process_refs;
 
- }
 
- static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
 
- {
 
- 	int process_refs, new_process_refs;
 
- 	struct cfq_queue *__cfqq;
 
- 	/*
 
- 	 * If there are no process references on the new_cfqq, then it is
 
- 	 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
 
- 	 * chain may have dropped their last reference (not just their
 
- 	 * last process reference).
 
- 	 */
 
- 	if (!cfqq_process_refs(new_cfqq))
 
- 		return;
 
- 	/* Avoid a circular list and skip interim queue merges */
 
- 	while ((__cfqq = new_cfqq->new_cfqq)) {
 
- 		if (__cfqq == cfqq)
 
- 			return;
 
- 		new_cfqq = __cfqq;
 
- 	}
 
- 	process_refs = cfqq_process_refs(cfqq);
 
- 	new_process_refs = cfqq_process_refs(new_cfqq);
 
- 	/*
 
- 	 * If the process for the cfqq has gone away, there is no
 
- 	 * sense in merging the queues.
 
- 	 */
 
- 	if (process_refs == 0 || new_process_refs == 0)
 
- 		return;
 
- 	/*
 
- 	 * Merge in the direction of the lesser amount of work.
 
- 	 */
 
- 	if (new_process_refs >= process_refs) {
 
- 		cfqq->new_cfqq = new_cfqq;
 
- 		new_cfqq->ref += process_refs;
 
- 	} else {
 
- 		new_cfqq->new_cfqq = cfqq;
 
- 		cfqq->ref += new_process_refs;
 
- 	}
 
- }
 
- static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
 
- 				struct cfq_group *cfqg, enum wl_prio_t prio)
 
- {
 
- 	struct cfq_queue *queue;
 
- 	int i;
 
- 	bool key_valid = false;
 
- 	unsigned long lowest_key = 0;
 
- 	enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
 
- 	for (i = 0; i <= SYNC_WORKLOAD; ++i) {
 
- 		/* select the one with lowest rb_key */
 
- 		queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
 
- 		if (queue &&
 
- 		    (!key_valid || time_before(queue->rb_key, lowest_key))) {
 
- 			lowest_key = queue->rb_key;
 
- 			cur_best = i;
 
- 			key_valid = true;
 
- 		}
 
- 	}
 
- 	return cur_best;
 
- }
 
- static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
 
- {
 
- 	unsigned slice;
 
- 	unsigned count;
 
- 	struct cfq_rb_root *st;
 
- 	unsigned group_slice;
 
- 	enum wl_prio_t original_prio = cfqd->serving_prio;
 
- 	/* Choose next priority. RT > BE > IDLE */
 
- 	if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
 
- 		cfqd->serving_prio = RT_WORKLOAD;
 
- 	else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
 
- 		cfqd->serving_prio = BE_WORKLOAD;
 
- 	else {
 
- 		cfqd->serving_prio = IDLE_WORKLOAD;
 
- 		cfqd->workload_expires = jiffies + 1;
 
- 		return;
 
- 	}
 
- 	if (original_prio != cfqd->serving_prio)
 
- 		goto new_workload;
 
- 	/*
 
- 	 * For RT and BE, we have to choose also the type
 
- 	 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
 
- 	 * expiration time
 
- 	 */
 
- 	st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
 
- 	count = st->count;
 
- 	/*
 
- 	 * check workload expiration, and that we still have other queues ready
 
- 	 */
 
- 	if (count && !time_after(jiffies, cfqd->workload_expires))
 
- 		return;
 
- new_workload:
 
- 	/* otherwise select new workload type */
 
- 	cfqd->serving_type =
 
- 		cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
 
- 	st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
 
- 	count = st->count;
 
- 	/*
 
- 	 * the workload slice is computed as a fraction of target latency
 
- 	 * proportional to the number of queues in that workload, over
 
- 	 * all the queues in the same priority class
 
- 	 */
 
- 	group_slice = cfq_group_slice(cfqd, cfqg);
 
- 	slice = group_slice * count /
 
- 		max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
 
- 		      cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
 
- 	if (cfqd->serving_type == ASYNC_WORKLOAD) {
 
- 		unsigned int tmp;
 
- 		/*
 
- 		 * Async queues are currently system wide. Just taking
 
- 		 * proportion of queues with-in same group will lead to higher
 
- 		 * async ratio system wide as generally root group is going
 
- 		 * to have higher weight. A more accurate thing would be to
 
- 		 * calculate system wide asnc/sync ratio.
 
- 		 */
 
- 		tmp = cfqd->cfq_target_latency *
 
- 			cfqg_busy_async_queues(cfqd, cfqg);
 
- 		tmp = tmp/cfqd->busy_queues;
 
- 		slice = min_t(unsigned, slice, tmp);
 
- 		/* async workload slice is scaled down according to
 
- 		 * the sync/async slice ratio. */
 
- 		slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
 
- 	} else
 
- 		/* sync workload slice is at least 2 * cfq_slice_idle */
 
- 		slice = max(slice, 2 * cfqd->cfq_slice_idle);
 
- 	slice = max_t(unsigned, slice, CFQ_MIN_TT);
 
- 	cfq_log(cfqd, "workload slice:%d", slice);
 
- 	cfqd->workload_expires = jiffies + slice;
 
- }
 
- static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
 
- {
 
- 	struct cfq_rb_root *st = &cfqd->grp_service_tree;
 
- 	struct cfq_group *cfqg;
 
- 	if (RB_EMPTY_ROOT(&st->rb))
 
- 		return NULL;
 
- 	cfqg = cfq_rb_first_group(st);
 
- 	update_min_vdisktime(st);
 
- 	return cfqg;
 
- }
 
- static void cfq_choose_cfqg(struct cfq_data *cfqd)
 
- {
 
- 	struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
 
- 	cfqd->serving_group = cfqg;
 
- 	/* Restore the workload type data */
 
- 	if (cfqg->saved_workload_slice) {
 
- 		cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
 
- 		cfqd->serving_type = cfqg->saved_workload;
 
- 		cfqd->serving_prio = cfqg->saved_serving_prio;
 
- 	} else
 
- 		cfqd->workload_expires = jiffies - 1;
 
- 	choose_service_tree(cfqd, cfqg);
 
- }
 
- /*
 
-  * Select a queue for service. If we have a current active queue,
 
-  * check whether to continue servicing it, or retrieve and set a new one.
 
-  */
 
- static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
 
- {
 
- 	struct cfq_queue *cfqq, *new_cfqq = NULL;
 
- 	cfqq = cfqd->active_queue;
 
- 	if (!cfqq)
 
- 		goto new_queue;
 
- 	if (!cfqd->rq_queued)
 
- 		return NULL;
 
- 	/*
 
- 	 * We were waiting for group to get backlogged. Expire the queue
 
- 	 */
 
- 	if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
 
- 		goto expire;
 
- 	/*
 
- 	 * The active queue has run out of time, expire it and select new.
 
- 	 */
 
- 	if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
 
- 		/*
 
- 		 * If slice had not expired at the completion of last request
 
- 		 * we might not have turned on wait_busy flag. Don't expire
 
- 		 * the queue yet. Allow the group to get backlogged.
 
- 		 *
 
- 		 * The very fact that we have used the slice, that means we
 
- 		 * have been idling all along on this queue and it should be
 
- 		 * ok to wait for this request to complete.
 
- 		 */
 
- 		if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
 
- 		    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
 
- 			cfqq = NULL;
 
- 			goto keep_queue;
 
- 		} else
 
- 			goto check_group_idle;
 
- 	}
 
- 	/*
 
- 	 * The active queue has requests and isn't expired, allow it to
 
- 	 * dispatch.
 
- 	 */
 
- 	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
 
- 		goto keep_queue;
 
- 	/*
 
- 	 * If another queue has a request waiting within our mean seek
 
- 	 * distance, let it run.  The expire code will check for close
 
- 	 * cooperators and put the close queue at the front of the service
 
- 	 * tree.  If possible, merge the expiring queue with the new cfqq.
 
- 	 */
 
- 	new_cfqq = cfq_close_cooperator(cfqd, cfqq);
 
- 	if (new_cfqq) {
 
- 		if (!cfqq->new_cfqq)
 
- 			cfq_setup_merge(cfqq, new_cfqq);
 
- 		goto expire;
 
- 	}
 
- 	/*
 
- 	 * No requests pending. If the active queue still has requests in
 
- 	 * flight or is idling for a new request, allow either of these
 
- 	 * conditions to happen (or time out) before selecting a new queue.
 
- 	 */
 
- 	if (timer_pending(&cfqd->idle_slice_timer)) {
 
- 		cfqq = NULL;
 
- 		goto keep_queue;
 
- 	}
 
- 	/*
 
- 	 * This is a deep seek queue, but the device is much faster than
 
- 	 * the queue can deliver, don't idle
 
- 	 **/
 
- 	if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
 
- 	    (cfq_cfqq_slice_new(cfqq) ||
 
- 	    (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
 
- 		cfq_clear_cfqq_deep(cfqq);
 
- 		cfq_clear_cfqq_idle_window(cfqq);
 
- 	}
 
- 	if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
 
- 		cfqq = NULL;
 
- 		goto keep_queue;
 
- 	}
 
- 	/*
 
- 	 * If group idle is enabled and there are requests dispatched from
 
- 	 * this group, wait for requests to complete.
 
- 	 */
 
- check_group_idle:
 
- 	if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
 
- 	    cfqq->cfqg->dispatched &&
 
- 	    !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
 
- 		cfqq = NULL;
 
- 		goto keep_queue;
 
- 	}
 
- expire:
 
- 	cfq_slice_expired(cfqd, 0);
 
- new_queue:
 
- 	/*
 
- 	 * Current queue expired. Check if we have to switch to a new
 
- 	 * service tree
 
- 	 */
 
- 	if (!new_cfqq)
 
- 		cfq_choose_cfqg(cfqd);
 
- 	cfqq = cfq_set_active_queue(cfqd, new_cfqq);
 
- keep_queue:
 
- 	return cfqq;
 
- }
 
- static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
 
- {
 
- 	int dispatched = 0;
 
- 	while (cfqq->next_rq) {
 
- 		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
 
- 		dispatched++;
 
- 	}
 
- 	BUG_ON(!list_empty(&cfqq->fifo));
 
- 	/* By default cfqq is not expired if it is empty. Do it explicitly */
 
- 	__cfq_slice_expired(cfqq->cfqd, cfqq, 0);
 
- 	return dispatched;
 
- }
 
- /*
 
-  * Drain our current requests. Used for barriers and when switching
 
-  * io schedulers on-the-fly.
 
-  */
 
- static int cfq_forced_dispatch(struct cfq_data *cfqd)
 
- {
 
- 	struct cfq_queue *cfqq;
 
- 	int dispatched = 0;
 
- 	/* Expire the timeslice of the current active queue first */
 
- 	cfq_slice_expired(cfqd, 0);
 
- 	while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
 
- 		__cfq_set_active_queue(cfqd, cfqq);
 
- 		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
 
- 	}
 
- 	BUG_ON(cfqd->busy_queues);
 
- 	cfq_log(cfqd, "forced_dispatch=%d", dispatched);
 
- 	return dispatched;
 
- }
 
- static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
 
- 	struct cfq_queue *cfqq)
 
- {
 
- 	/* the queue hasn't finished any request, can't estimate */
 
- 	if (cfq_cfqq_slice_new(cfqq))
 
- 		return true;
 
- 	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
 
- 		cfqq->slice_end))
 
- 		return true;
 
- 	return false;
 
- }
 
- static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	unsigned int max_dispatch;
 
- 	/*
 
- 	 * Drain async requests before we start sync IO
 
- 	 */
 
- 	if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
 
- 		return false;
 
- 	/*
 
- 	 * If this is an async queue and we have sync IO in flight, let it wait
 
- 	 */
 
- 	if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
 
- 		return false;
 
- 	max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
 
- 	if (cfq_class_idle(cfqq))
 
- 		max_dispatch = 1;
 
- 	/*
 
- 	 * Does this cfqq already have too much IO in flight?
 
- 	 */
 
- 	if (cfqq->dispatched >= max_dispatch) {
 
- 		bool promote_sync = false;
 
- 		/*
 
- 		 * idle queue must always only have a single IO in flight
 
- 		 */
 
- 		if (cfq_class_idle(cfqq))
 
- 			return false;
 
- 		/*
 
- 		 * If there is only one sync queue
 
- 		 * we can ignore async queue here and give the sync
 
- 		 * queue no dispatch limit. The reason is a sync queue can
 
- 		 * preempt async queue, limiting the sync queue doesn't make
 
- 		 * sense. This is useful for aiostress test.
 
- 		 */
 
- 		if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
 
- 			promote_sync = true;
 
- 		/*
 
- 		 * We have other queues, don't allow more IO from this one
 
- 		 */
 
- 		if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
 
- 				!promote_sync)
 
- 			return false;
 
- 		/*
 
- 		 * Sole queue user, no limit
 
- 		 */
 
- 		if (cfqd->busy_queues == 1 || promote_sync)
 
- 			max_dispatch = -1;
 
- 		else
 
- 			/*
 
- 			 * Normally we start throttling cfqq when cfq_quantum/2
 
- 			 * requests have been dispatched. But we can drive
 
- 			 * deeper queue depths at the beginning of slice
 
- 			 * subjected to upper limit of cfq_quantum.
 
- 			 * */
 
- 			max_dispatch = cfqd->cfq_quantum;
 
- 	}
 
- 	/*
 
- 	 * Async queues must wait a bit before being allowed dispatch.
 
- 	 * We also ramp up the dispatch depth gradually for async IO,
 
- 	 * based on the last sync IO we serviced
 
- 	 */
 
- 	if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
 
- 		unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
 
- 		unsigned int depth;
 
- 		depth = last_sync / cfqd->cfq_slice[1];
 
- 		if (!depth && !cfqq->dispatched)
 
- 			depth = 1;
 
- 		if (depth < max_dispatch)
 
- 			max_dispatch = depth;
 
- 	}
 
- 	/*
 
- 	 * If we're below the current max, allow a dispatch
 
- 	 */
 
- 	return cfqq->dispatched < max_dispatch;
 
- }
 
- /*
 
-  * Dispatch a request from cfqq, moving them to the request queue
 
-  * dispatch list.
 
-  */
 
- static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	struct request *rq;
 
- 	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
 
- 	if (!cfq_may_dispatch(cfqd, cfqq))
 
- 		return false;
 
- 	/*
 
- 	 * follow expired path, else get first next available
 
- 	 */
 
- 	rq = cfq_check_fifo(cfqq);
 
- 	if (!rq)
 
- 		rq = cfqq->next_rq;
 
- 	/*
 
- 	 * insert request into driver dispatch list
 
- 	 */
 
- 	cfq_dispatch_insert(cfqd->queue, rq);
 
- 	if (!cfqd->active_cic) {
 
- 		struct cfq_io_cq *cic = RQ_CIC(rq);
 
- 		atomic_long_inc(&cic->icq.ioc->refcount);
 
- 		cfqd->active_cic = cic;
 
- 	}
 
- 	return true;
 
- }
 
- /*
 
-  * Find the cfqq that we need to service and move a request from that to the
 
-  * dispatch list
 
-  */
 
- static int cfq_dispatch_requests(struct request_queue *q, int force)
 
- {
 
- 	struct cfq_data *cfqd = q->elevator->elevator_data;
 
- 	struct cfq_queue *cfqq;
 
- 	if (!cfqd->busy_queues)
 
- 		return 0;
 
- 	if (unlikely(force))
 
- 		return cfq_forced_dispatch(cfqd);
 
- 	cfqq = cfq_select_queue(cfqd);
 
- 	if (!cfqq)
 
- 		return 0;
 
- 	/*
 
- 	 * Dispatch a request from this cfqq, if it is allowed
 
- 	 */
 
- 	if (!cfq_dispatch_request(cfqd, cfqq))
 
- 		return 0;
 
- 	cfqq->slice_dispatch++;
 
- 	cfq_clear_cfqq_must_dispatch(cfqq);
 
- 	/*
 
- 	 * expire an async queue immediately if it has used up its slice. idle
 
- 	 * queue always expire after 1 dispatch round.
 
- 	 */
 
- 	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
 
- 	    cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
 
- 	    cfq_class_idle(cfqq))) {
 
- 		cfqq->slice_end = jiffies + 1;
 
- 		cfq_slice_expired(cfqd, 0);
 
- 	}
 
- 	cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
 
- 	return 1;
 
- }
 
- /*
 
-  * task holds one reference to the queue, dropped when task exits. each rq
 
-  * in-flight on this queue also holds a reference, dropped when rq is freed.
 
-  *
 
-  * Each cfq queue took a reference on the parent group. Drop it now.
 
-  * queue lock must be held here.
 
-  */
 
- static void cfq_put_queue(struct cfq_queue *cfqq)
 
- {
 
- 	struct cfq_data *cfqd = cfqq->cfqd;
 
- 	struct cfq_group *cfqg;
 
- 	BUG_ON(cfqq->ref <= 0);
 
- 	cfqq->ref--;
 
- 	if (cfqq->ref)
 
- 		return;
 
- 	cfq_log_cfqq(cfqd, cfqq, "put_queue");
 
- 	BUG_ON(rb_first(&cfqq->sort_list));
 
- 	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
 
- 	cfqg = cfqq->cfqg;
 
- 	if (unlikely(cfqd->active_queue == cfqq)) {
 
- 		__cfq_slice_expired(cfqd, cfqq, 0);
 
- 		cfq_schedule_dispatch(cfqd);
 
- 	}
 
- 	BUG_ON(cfq_cfqq_on_rr(cfqq));
 
- 	kmem_cache_free(cfq_pool, cfqq);
 
- 	cfqg_put(cfqg);
 
- }
 
- static void cfq_put_cooperator(struct cfq_queue *cfqq)
 
- {
 
- 	struct cfq_queue *__cfqq, *next;
 
- 	/*
 
- 	 * If this queue was scheduled to merge with another queue, be
 
- 	 * sure to drop the reference taken on that queue (and others in
 
- 	 * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
 
- 	 */
 
- 	__cfqq = cfqq->new_cfqq;
 
- 	while (__cfqq) {
 
- 		if (__cfqq == cfqq) {
 
- 			WARN(1, "cfqq->new_cfqq loop detected\n");
 
- 			break;
 
- 		}
 
- 		next = __cfqq->new_cfqq;
 
- 		cfq_put_queue(__cfqq);
 
- 		__cfqq = next;
 
- 	}
 
- }
 
- static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	if (unlikely(cfqq == cfqd->active_queue)) {
 
- 		__cfq_slice_expired(cfqd, cfqq, 0);
 
- 		cfq_schedule_dispatch(cfqd);
 
- 	}
 
- 	cfq_put_cooperator(cfqq);
 
- 	cfq_put_queue(cfqq);
 
- }
 
- static void cfq_init_icq(struct io_cq *icq)
 
- {
 
- 	struct cfq_io_cq *cic = icq_to_cic(icq);
 
- 	cic->ttime.last_end_request = jiffies;
 
- }
 
- static void cfq_exit_icq(struct io_cq *icq)
 
- {
 
- 	struct cfq_io_cq *cic = icq_to_cic(icq);
 
- 	struct cfq_data *cfqd = cic_to_cfqd(cic);
 
- 	if (cic->cfqq[BLK_RW_ASYNC]) {
 
- 		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
 
- 		cic->cfqq[BLK_RW_ASYNC] = NULL;
 
- 	}
 
- 	if (cic->cfqq[BLK_RW_SYNC]) {
 
- 		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
 
- 		cic->cfqq[BLK_RW_SYNC] = NULL;
 
- 	}
 
- }
 
- static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
 
- {
 
- 	struct task_struct *tsk = current;
 
- 	int ioprio_class;
 
- 	if (!cfq_cfqq_prio_changed(cfqq))
 
- 		return;
 
- 	ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
 
- 	switch (ioprio_class) {
 
- 	default:
 
- 		printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
 
- 	case IOPRIO_CLASS_NONE:
 
- 		/*
 
- 		 * no prio set, inherit CPU scheduling settings
 
- 		 */
 
- 		cfqq->ioprio = task_nice_ioprio(tsk);
 
- 		cfqq->ioprio_class = task_nice_ioclass(tsk);
 
- 		break;
 
- 	case IOPRIO_CLASS_RT:
 
- 		cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
 
- 		cfqq->ioprio_class = IOPRIO_CLASS_RT;
 
- 		break;
 
- 	case IOPRIO_CLASS_BE:
 
- 		cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
 
- 		cfqq->ioprio_class = IOPRIO_CLASS_BE;
 
- 		break;
 
- 	case IOPRIO_CLASS_IDLE:
 
- 		cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
 
- 		cfqq->ioprio = 7;
 
- 		cfq_clear_cfqq_idle_window(cfqq);
 
- 		break;
 
- 	}
 
- 	/*
 
- 	 * keep track of original prio settings in case we have to temporarily
 
- 	 * elevate the priority of this queue
 
- 	 */
 
- 	cfqq->org_ioprio = cfqq->ioprio;
 
- 	cfq_clear_cfqq_prio_changed(cfqq);
 
- }
 
- static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
 
- {
 
- 	int ioprio = cic->icq.ioc->ioprio;
 
- 	struct cfq_data *cfqd = cic_to_cfqd(cic);
 
- 	struct cfq_queue *cfqq;
 
- 	/*
 
- 	 * Check whether ioprio has changed.  The condition may trigger
 
- 	 * spuriously on a newly created cic but there's no harm.
 
- 	 */
 
- 	if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
 
- 		return;
 
- 	cfqq = cic->cfqq[BLK_RW_ASYNC];
 
- 	if (cfqq) {
 
- 		struct cfq_queue *new_cfqq;
 
- 		new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
 
- 					 GFP_ATOMIC);
 
- 		if (new_cfqq) {
 
- 			cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
 
- 			cfq_put_queue(cfqq);
 
- 		}
 
- 	}
 
- 	cfqq = cic->cfqq[BLK_RW_SYNC];
 
- 	if (cfqq)
 
- 		cfq_mark_cfqq_prio_changed(cfqq);
 
- 	cic->ioprio = ioprio;
 
- }
 
- static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
- 			  pid_t pid, bool is_sync)
 
- {
 
- 	RB_CLEAR_NODE(&cfqq->rb_node);
 
- 	RB_CLEAR_NODE(&cfqq->p_node);
 
- 	INIT_LIST_HEAD(&cfqq->fifo);
 
- 	cfqq->ref = 0;
 
- 	cfqq->cfqd = cfqd;
 
- 	cfq_mark_cfqq_prio_changed(cfqq);
 
- 	if (is_sync) {
 
- 		if (!cfq_class_idle(cfqq))
 
- 			cfq_mark_cfqq_idle_window(cfqq);
 
- 		cfq_mark_cfqq_sync(cfqq);
 
- 	}
 
- 	cfqq->pid = pid;
 
- }
 
- #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
- static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
 
- {
 
- 	struct cfq_data *cfqd = cic_to_cfqd(cic);
 
- 	struct cfq_queue *sync_cfqq;
 
- 	uint64_t id;
 
- 	rcu_read_lock();
 
- 	id = bio_blkcg(bio)->id;
 
- 	rcu_read_unlock();
 
- 	/*
 
- 	 * Check whether blkcg has changed.  The condition may trigger
 
- 	 * spuriously on a newly created cic but there's no harm.
 
- 	 */
 
- 	if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
 
- 		return;
 
- 	sync_cfqq = cic_to_cfqq(cic, 1);
 
- 	if (sync_cfqq) {
 
- 		/*
 
- 		 * Drop reference to sync queue. A new sync queue will be
 
- 		 * assigned in new group upon arrival of a fresh request.
 
- 		 */
 
- 		cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
 
- 		cic_set_cfqq(cic, NULL, 1);
 
- 		cfq_put_queue(sync_cfqq);
 
- 	}
 
- 	cic->blkcg_id = id;
 
- }
 
- #else
 
- static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
 
- #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
 
- static struct cfq_queue *
 
- cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
 
- 		     struct bio *bio, gfp_t gfp_mask)
 
- {
 
- 	struct blkcg *blkcg;
 
- 	struct cfq_queue *cfqq, *new_cfqq = NULL;
 
- 	struct cfq_group *cfqg;
 
- retry:
 
- 	rcu_read_lock();
 
- 	blkcg = bio_blkcg(bio);
 
- 	cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
 
- 	cfqq = cic_to_cfqq(cic, is_sync);
 
- 	/*
 
- 	 * Always try a new alloc if we fell back to the OOM cfqq
 
- 	 * originally, since it should just be a temporary situation.
 
- 	 */
 
- 	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
 
- 		cfqq = NULL;
 
- 		if (new_cfqq) {
 
- 			cfqq = new_cfqq;
 
- 			new_cfqq = NULL;
 
- 		} else if (gfp_mask & __GFP_WAIT) {
 
- 			rcu_read_unlock();
 
- 			spin_unlock_irq(cfqd->queue->queue_lock);
 
- 			new_cfqq = kmem_cache_alloc_node(cfq_pool,
 
- 					gfp_mask | __GFP_ZERO,
 
- 					cfqd->queue->node);
 
- 			spin_lock_irq(cfqd->queue->queue_lock);
 
- 			if (new_cfqq)
 
- 				goto retry;
 
- 		} else {
 
- 			cfqq = kmem_cache_alloc_node(cfq_pool,
 
- 					gfp_mask | __GFP_ZERO,
 
- 					cfqd->queue->node);
 
- 		}
 
- 		if (cfqq) {
 
- 			cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
 
- 			cfq_init_prio_data(cfqq, cic);
 
- 			cfq_link_cfqq_cfqg(cfqq, cfqg);
 
- 			cfq_log_cfqq(cfqd, cfqq, "alloced");
 
- 		} else
 
- 			cfqq = &cfqd->oom_cfqq;
 
- 	}
 
- 	if (new_cfqq)
 
- 		kmem_cache_free(cfq_pool, new_cfqq);
 
- 	rcu_read_unlock();
 
- 	return cfqq;
 
- }
 
- static struct cfq_queue **
 
- cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
 
- {
 
- 	switch (ioprio_class) {
 
- 	case IOPRIO_CLASS_RT:
 
- 		return &cfqd->async_cfqq[0][ioprio];
 
- 	case IOPRIO_CLASS_NONE:
 
- 		ioprio = IOPRIO_NORM;
 
- 		/* fall through */
 
- 	case IOPRIO_CLASS_BE:
 
- 		return &cfqd->async_cfqq[1][ioprio];
 
- 	case IOPRIO_CLASS_IDLE:
 
- 		return &cfqd->async_idle_cfqq;
 
- 	default:
 
- 		BUG();
 
- 	}
 
- }
 
- static struct cfq_queue *
 
- cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
 
- 	      struct bio *bio, gfp_t gfp_mask)
 
- {
 
- 	const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
 
- 	const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
 
- 	struct cfq_queue **async_cfqq = NULL;
 
- 	struct cfq_queue *cfqq = NULL;
 
- 	if (!is_sync) {
 
- 		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
 
- 		cfqq = *async_cfqq;
 
- 	}
 
- 	if (!cfqq)
 
- 		cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
 
- 	/*
 
- 	 * pin the queue now that it's allocated, scheduler exit will prune it
 
- 	 */
 
- 	if (!is_sync && !(*async_cfqq)) {
 
- 		cfqq->ref++;
 
- 		*async_cfqq = cfqq;
 
- 	}
 
- 	cfqq->ref++;
 
- 	return cfqq;
 
- }
 
- static void
 
- __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
 
- {
 
- 	unsigned long elapsed = jiffies - ttime->last_end_request;
 
- 	elapsed = min(elapsed, 2UL * slice_idle);
 
- 	ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
 
- 	ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
 
- 	ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
 
- }
 
- static void
 
- cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
- 			struct cfq_io_cq *cic)
 
- {
 
- 	if (cfq_cfqq_sync(cfqq)) {
 
- 		__cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
 
- 		__cfq_update_io_thinktime(&cfqq->service_tree->ttime,
 
- 			cfqd->cfq_slice_idle);
 
- 	}
 
- #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
- 	__cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
 
- #endif
 
- }
 
- static void
 
- cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
- 		       struct request *rq)
 
- {
 
- 	sector_t sdist = 0;
 
- 	sector_t n_sec = blk_rq_sectors(rq);
 
- 	if (cfqq->last_request_pos) {
 
- 		if (cfqq->last_request_pos < blk_rq_pos(rq))
 
- 			sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
 
- 		else
 
- 			sdist = cfqq->last_request_pos - blk_rq_pos(rq);
 
- 	}
 
- 	cfqq->seek_history <<= 1;
 
- 	if (blk_queue_nonrot(cfqd->queue))
 
- 		cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
 
- 	else
 
- 		cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
 
- }
 
- /*
 
-  * Disable idle window if the process thinks too long or seeks so much that
 
-  * it doesn't matter
 
-  */
 
- static void
 
- cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
- 		       struct cfq_io_cq *cic)
 
- {
 
- 	int old_idle, enable_idle;
 
- 	/*
 
- 	 * Don't idle for async or idle io prio class
 
- 	 */
 
- 	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
 
- 		return;
 
- 	enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
 
- 	if (cfqq->queued[0] + cfqq->queued[1] >= 4)
 
- 		cfq_mark_cfqq_deep(cfqq);
 
- 	if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
 
- 		enable_idle = 0;
 
- 	else if (!atomic_read(&cic->icq.ioc->active_ref) ||
 
- 		 !cfqd->cfq_slice_idle ||
 
- 		 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
 
- 		enable_idle = 0;
 
- 	else if (sample_valid(cic->ttime.ttime_samples)) {
 
- 		if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
 
- 			enable_idle = 0;
 
- 		else
 
- 			enable_idle = 1;
 
- 	}
 
- 	if (old_idle != enable_idle) {
 
- 		cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
 
- 		if (enable_idle)
 
- 			cfq_mark_cfqq_idle_window(cfqq);
 
- 		else
 
- 			cfq_clear_cfqq_idle_window(cfqq);
 
- 	}
 
- }
 
- /*
 
-  * Check if new_cfqq should preempt the currently active queue. Return 0 for
 
-  * no or if we aren't sure, a 1 will cause a preempt.
 
-  */
 
- static bool
 
- cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
 
- 		   struct request *rq)
 
- {
 
- 	struct cfq_queue *cfqq;
 
- 	cfqq = cfqd->active_queue;
 
- 	if (!cfqq)
 
- 		return false;
 
- 	if (cfq_class_idle(new_cfqq))
 
- 		return false;
 
- 	if (cfq_class_idle(cfqq))
 
- 		return true;
 
- 	/*
 
- 	 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
 
- 	 */
 
- 	if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
 
- 		return false;
 
- 	/*
 
- 	 * if the new request is sync, but the currently running queue is
 
- 	 * not, let the sync request have priority.
 
- 	 */
 
- 	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
 
- 		return true;
 
- 	if (new_cfqq->cfqg != cfqq->cfqg)
 
- 		return false;
 
- 	if (cfq_slice_used(cfqq))
 
- 		return true;
 
- 	/* Allow preemption only if we are idling on sync-noidle tree */
 
- 	if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
 
- 	    cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
 
- 	    new_cfqq->service_tree->count == 2 &&
 
- 	    RB_EMPTY_ROOT(&cfqq->sort_list))
 
- 		return true;
 
- 	/*
 
- 	 * So both queues are sync. Let the new request get disk time if
 
- 	 * it's a metadata request and the current queue is doing regular IO.
 
- 	 */
 
- 	if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
 
- 		return true;
 
- 	/*
 
- 	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
 
- 	 */
 
- 	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
 
- 		return true;
 
- 	/* An idle queue should not be idle now for some reason */
 
- 	if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
 
- 		return true;
 
- 	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
 
- 		return false;
 
- 	/*
 
- 	 * if this request is as-good as one we would expect from the
 
- 	 * current cfqq, let it preempt
 
- 	 */
 
- 	if (cfq_rq_close(cfqd, cfqq, rq))
 
- 		return true;
 
- 	return false;
 
- }
 
- /*
 
-  * cfqq preempts the active queue. if we allowed preempt with no slice left,
 
-  * let it have half of its nominal slice.
 
-  */
 
- static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
 
- 	cfq_log_cfqq(cfqd, cfqq, "preempt");
 
- 	cfq_slice_expired(cfqd, 1);
 
- 	/*
 
- 	 * workload type is changed, don't save slice, otherwise preempt
 
- 	 * doesn't happen
 
- 	 */
 
- 	if (old_type != cfqq_type(cfqq))
 
- 		cfqq->cfqg->saved_workload_slice = 0;
 
- 	/*
 
- 	 * Put the new queue at the front of the of the current list,
 
- 	 * so we know that it will be selected next.
 
- 	 */
 
- 	BUG_ON(!cfq_cfqq_on_rr(cfqq));
 
- 	cfq_service_tree_add(cfqd, cfqq, 1);
 
- 	cfqq->slice_end = 0;
 
- 	cfq_mark_cfqq_slice_new(cfqq);
 
- }
 
- /*
 
-  * Called when a new fs request (rq) is added (to cfqq). Check if there's
 
-  * something we should do about it
 
-  */
 
- static void
 
- cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
- 		struct request *rq)
 
- {
 
- 	struct cfq_io_cq *cic = RQ_CIC(rq);
 
- 	cfqd->rq_queued++;
 
- 	if (rq->cmd_flags & REQ_PRIO)
 
- 		cfqq->prio_pending++;
 
- 	cfq_update_io_thinktime(cfqd, cfqq, cic);
 
- 	cfq_update_io_seektime(cfqd, cfqq, rq);
 
- 	cfq_update_idle_window(cfqd, cfqq, cic);
 
- 	cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
 
- 	if (cfqq == cfqd->active_queue) {
 
- 		/*
 
- 		 * Remember that we saw a request from this process, but
 
- 		 * don't start queuing just yet. Otherwise we risk seeing lots
 
- 		 * of tiny requests, because we disrupt the normal plugging
 
- 		 * and merging. If the request is already larger than a single
 
- 		 * page, let it rip immediately. For that case we assume that
 
- 		 * merging is already done. Ditto for a busy system that
 
- 		 * has other work pending, don't risk delaying until the
 
- 		 * idle timer unplug to continue working.
 
- 		 */
 
- 		if (cfq_cfqq_wait_request(cfqq)) {
 
- 			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
 
- 			    cfqd->busy_queues > 1) {
 
- 				cfq_del_timer(cfqd, cfqq);
 
- 				cfq_clear_cfqq_wait_request(cfqq);
 
- 				__blk_run_queue(cfqd->queue);
 
- 			} else {
 
- 				cfqg_stats_update_idle_time(cfqq->cfqg);
 
- 				cfq_mark_cfqq_must_dispatch(cfqq);
 
- 			}
 
- 		}
 
- 	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
 
- 		/*
 
- 		 * not the active queue - expire current slice if it is
 
- 		 * idle and has expired it's mean thinktime or this new queue
 
- 		 * has some old slice time left and is of higher priority or
 
- 		 * this new queue is RT and the current one is BE
 
- 		 */
 
- 		cfq_preempt_queue(cfqd, cfqq);
 
- 		__blk_run_queue(cfqd->queue);
 
- 	}
 
- }
 
- static void cfq_insert_request(struct request_queue *q, struct request *rq)
 
- {
 
- 	struct cfq_data *cfqd = q->elevator->elevator_data;
 
- 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
 
- 	cfq_log_cfqq(cfqd, cfqq, "insert_request");
 
- 	cfq_init_prio_data(cfqq, RQ_CIC(rq));
 
- 	rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
 
- 	list_add_tail(&rq->queuelist, &cfqq->fifo);
 
- 	cfq_add_rq_rb(rq);
 
- 	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
 
- 				 rq->cmd_flags);
 
- 	cfq_rq_enqueued(cfqd, cfqq, rq);
 
- }
 
- /*
 
-  * Update hw_tag based on peak queue depth over 50 samples under
 
-  * sufficient load.
 
-  */
 
- static void cfq_update_hw_tag(struct cfq_data *cfqd)
 
- {
 
- 	struct cfq_queue *cfqq = cfqd->active_queue;
 
- 	if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
 
- 		cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
 
- 	if (cfqd->hw_tag == 1)
 
- 		return;
 
- 	if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
 
- 	    cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
 
- 		return;
 
- 	/*
 
- 	 * If active queue hasn't enough requests and can idle, cfq might not
 
- 	 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
 
- 	 * case
 
- 	 */
 
- 	if (cfqq && cfq_cfqq_idle_window(cfqq) &&
 
- 	    cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
 
- 	    CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
 
- 		return;
 
- 	if (cfqd->hw_tag_samples++ < 50)
 
- 		return;
 
- 	if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
 
- 		cfqd->hw_tag = 1;
 
- 	else
 
- 		cfqd->hw_tag = 0;
 
- }
 
- static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
- {
 
- 	struct cfq_io_cq *cic = cfqd->active_cic;
 
- 	/* If the queue already has requests, don't wait */
 
- 	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
 
- 		return false;
 
- 	/* If there are other queues in the group, don't wait */
 
- 	if (cfqq->cfqg->nr_cfqq > 1)
 
- 		return false;
 
- 	/* the only queue in the group, but think time is big */
 
- 	if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
 
- 		return false;
 
- 	if (cfq_slice_used(cfqq))
 
- 		return true;
 
- 	/* if slice left is less than think time, wait busy */
 
- 	if (cic && sample_valid(cic->ttime.ttime_samples)
 
- 	    && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
 
- 		return true;
 
- 	/*
 
- 	 * If think times is less than a jiffy than ttime_mean=0 and above
 
- 	 * will not be true. It might happen that slice has not expired yet
 
- 	 * but will expire soon (4-5 ns) during select_queue(). To cover the
 
- 	 * case where think time is less than a jiffy, mark the queue wait
 
- 	 * busy if only 1 jiffy is left in the slice.
 
- 	 */
 
- 	if (cfqq->slice_end - jiffies == 1)
 
- 		return true;
 
- 	return false;
 
- }
 
- static void cfq_completed_request(struct request_queue *q, struct request *rq)
 
- {
 
- 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
 
- 	struct cfq_data *cfqd = cfqq->cfqd;
 
- 	const int sync = rq_is_sync(rq);
 
- 	unsigned long now;
 
- 	now = jiffies;
 
- 	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
 
- 		     !!(rq->cmd_flags & REQ_NOIDLE));
 
- 	cfq_update_hw_tag(cfqd);
 
- 	WARN_ON(!cfqd->rq_in_driver);
 
- 	WARN_ON(!cfqq->dispatched);
 
- 	cfqd->rq_in_driver--;
 
- 	cfqq->dispatched--;
 
- 	(RQ_CFQG(rq))->dispatched--;
 
- 	cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
 
- 				     rq_io_start_time_ns(rq), rq->cmd_flags);
 
- 	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
 
- 	if (sync) {
 
- 		struct cfq_rb_root *service_tree;
 
- 		RQ_CIC(rq)->ttime.last_end_request = now;
 
- 		if (cfq_cfqq_on_rr(cfqq))
 
- 			service_tree = cfqq->service_tree;
 
- 		else
 
- 			service_tree = service_tree_for(cfqq->cfqg,
 
- 				cfqq_prio(cfqq), cfqq_type(cfqq));
 
- 		service_tree->ttime.last_end_request = now;
 
- 		if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
 
- 			cfqd->last_delayed_sync = now;
 
- 	}
 
- #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
- 	cfqq->cfqg->ttime.last_end_request = now;
 
- #endif
 
- 	/*
 
- 	 * If this is the active queue, check if it needs to be expired,
 
- 	 * or if we want to idle in case it has no pending requests.
 
- 	 */
 
- 	if (cfqd->active_queue == cfqq) {
 
- 		const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
 
- 		if (cfq_cfqq_slice_new(cfqq)) {
 
- 			cfq_set_prio_slice(cfqd, cfqq);
 
- 			cfq_clear_cfqq_slice_new(cfqq);
 
- 		}
 
- 		/*
 
- 		 * Should we wait for next request to come in before we expire
 
- 		 * the queue.
 
- 		 */
 
- 		if (cfq_should_wait_busy(cfqd, cfqq)) {
 
- 			unsigned long extend_sl = cfqd->cfq_slice_idle;
 
- 			if (!cfqd->cfq_slice_idle)
 
- 				extend_sl = cfqd->cfq_group_idle;
 
- 			cfqq->slice_end = jiffies + extend_sl;
 
- 			cfq_mark_cfqq_wait_busy(cfqq);
 
- 			cfq_log_cfqq(cfqd, cfqq, "will busy wait");
 
- 		}
 
- 		/*
 
- 		 * Idling is not enabled on:
 
- 		 * - expired queues
 
- 		 * - idle-priority queues
 
- 		 * - async queues
 
- 		 * - queues with still some requests queued
 
- 		 * - when there is a close cooperator
 
- 		 */
 
- 		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
 
- 			cfq_slice_expired(cfqd, 1);
 
- 		else if (sync && cfqq_empty &&
 
- 			 !cfq_close_cooperator(cfqd, cfqq)) {
 
- 			cfq_arm_slice_timer(cfqd);
 
- 		}
 
- 	}
 
- 	if (!cfqd->rq_in_driver)
 
- 		cfq_schedule_dispatch(cfqd);
 
- }
 
- static inline int __cfq_may_queue(struct cfq_queue *cfqq)
 
- {
 
- 	if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
 
- 		cfq_mark_cfqq_must_alloc_slice(cfqq);
 
- 		return ELV_MQUEUE_MUST;
 
- 	}
 
- 	return ELV_MQUEUE_MAY;
 
- }
 
- static int cfq_may_queue(struct request_queue *q, int rw)
 
- {
 
- 	struct cfq_data *cfqd = q->elevator->elevator_data;
 
- 	struct task_struct *tsk = current;
 
- 	struct cfq_io_cq *cic;
 
- 	struct cfq_queue *cfqq;
 
- 	/*
 
- 	 * don't force setup of a queue from here, as a call to may_queue
 
- 	 * does not necessarily imply that a request actually will be queued.
 
- 	 * so just lookup a possibly existing queue, or return 'may queue'
 
- 	 * if that fails
 
- 	 */
 
- 	cic = cfq_cic_lookup(cfqd, tsk->io_context);
 
- 	if (!cic)
 
- 		return ELV_MQUEUE_MAY;
 
- 	cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
 
- 	if (cfqq) {
 
- 		cfq_init_prio_data(cfqq, cic);
 
- 		return __cfq_may_queue(cfqq);
 
- 	}
 
- 	return ELV_MQUEUE_MAY;
 
- }
 
- /*
 
-  * queue lock held here
 
-  */
 
- static void cfq_put_request(struct request *rq)
 
- {
 
- 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
 
- 	if (cfqq) {
 
- 		const int rw = rq_data_dir(rq);
 
- 		BUG_ON(!cfqq->allocated[rw]);
 
- 		cfqq->allocated[rw]--;
 
- 		/* Put down rq reference on cfqg */
 
- 		cfqg_put(RQ_CFQG(rq));
 
- 		rq->elv.priv[0] = NULL;
 
- 		rq->elv.priv[1] = NULL;
 
- 		cfq_put_queue(cfqq);
 
- 	}
 
- }
 
- static struct cfq_queue *
 
- cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
 
- 		struct cfq_queue *cfqq)
 
- {
 
- 	cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
 
- 	cic_set_cfqq(cic, cfqq->new_cfqq, 1);
 
- 	cfq_mark_cfqq_coop(cfqq->new_cfqq);
 
- 	cfq_put_queue(cfqq);
 
- 	return cic_to_cfqq(cic, 1);
 
- }
 
- /*
 
-  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
 
-  * was the last process referring to said cfqq.
 
-  */
 
- static struct cfq_queue *
 
- split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
 
- {
 
- 	if (cfqq_process_refs(cfqq) == 1) {
 
- 		cfqq->pid = current->pid;
 
- 		cfq_clear_cfqq_coop(cfqq);
 
- 		cfq_clear_cfqq_split_coop(cfqq);
 
- 		return cfqq;
 
- 	}
 
- 	cic_set_cfqq(cic, NULL, 1);
 
- 	cfq_put_cooperator(cfqq);
 
- 	cfq_put_queue(cfqq);
 
- 	return NULL;
 
- }
 
- /*
 
-  * Allocate cfq data structures associated with this request.
 
-  */
 
- static int
 
- cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
 
- 		gfp_t gfp_mask)
 
- {
 
- 	struct cfq_data *cfqd = q->elevator->elevator_data;
 
- 	struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
 
- 	const int rw = rq_data_dir(rq);
 
- 	const bool is_sync = rq_is_sync(rq);
 
- 	struct cfq_queue *cfqq;
 
- 	might_sleep_if(gfp_mask & __GFP_WAIT);
 
- 	spin_lock_irq(q->queue_lock);
 
- 	check_ioprio_changed(cic, bio);
 
- 	check_blkcg_changed(cic, bio);
 
- new_queue:
 
- 	cfqq = cic_to_cfqq(cic, is_sync);
 
- 	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
 
- 		cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
 
- 		cic_set_cfqq(cic, cfqq, is_sync);
 
- 	} else {
 
- 		/*
 
- 		 * If the queue was seeky for too long, break it apart.
 
- 		 */
 
- 		if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
 
- 			cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
 
- 			cfqq = split_cfqq(cic, cfqq);
 
- 			if (!cfqq)
 
- 				goto new_queue;
 
- 		}
 
- 		/*
 
- 		 * Check to see if this queue is scheduled to merge with
 
- 		 * another, closely cooperating queue.  The merging of
 
- 		 * queues happens here as it must be done in process context.
 
- 		 * The reference on new_cfqq was taken in merge_cfqqs.
 
- 		 */
 
- 		if (cfqq->new_cfqq)
 
- 			cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
 
- 	}
 
- 	cfqq->allocated[rw]++;
 
- 	cfqq->ref++;
 
- 	cfqg_get(cfqq->cfqg);
 
- 	rq->elv.priv[0] = cfqq;
 
- 	rq->elv.priv[1] = cfqq->cfqg;
 
- 	spin_unlock_irq(q->queue_lock);
 
- 	return 0;
 
- }
 
- static void cfq_kick_queue(struct work_struct *work)
 
- {
 
- 	struct cfq_data *cfqd =
 
- 		container_of(work, struct cfq_data, unplug_work);
 
- 	struct request_queue *q = cfqd->queue;
 
- 	spin_lock_irq(q->queue_lock);
 
- 	__blk_run_queue(cfqd->queue);
 
- 	spin_unlock_irq(q->queue_lock);
 
- }
 
- /*
 
-  * Timer running if the active_queue is currently idling inside its time slice
 
-  */
 
- static void cfq_idle_slice_timer(unsigned long data)
 
- {
 
- 	struct cfq_data *cfqd = (struct cfq_data *) data;
 
- 	struct cfq_queue *cfqq;
 
- 	unsigned long flags;
 
- 	int timed_out = 1;
 
- 	cfq_log(cfqd, "idle timer fired");
 
- 	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
 
- 	cfqq = cfqd->active_queue;
 
- 	if (cfqq) {
 
- 		timed_out = 0;
 
- 		/*
 
- 		 * We saw a request before the queue expired, let it through
 
- 		 */
 
- 		if (cfq_cfqq_must_dispatch(cfqq))
 
- 			goto out_kick;
 
- 		/*
 
- 		 * expired
 
- 		 */
 
- 		if (cfq_slice_used(cfqq))
 
- 			goto expire;
 
- 		/*
 
- 		 * only expire and reinvoke request handler, if there are
 
- 		 * other queues with pending requests
 
- 		 */
 
- 		if (!cfqd->busy_queues)
 
- 			goto out_cont;
 
- 		/*
 
- 		 * not expired and it has a request pending, let it dispatch
 
- 		 */
 
- 		if (!RB_EMPTY_ROOT(&cfqq->sort_list))
 
- 			goto out_kick;
 
- 		/*
 
- 		 * Queue depth flag is reset only when the idle didn't succeed
 
- 		 */
 
- 		cfq_clear_cfqq_deep(cfqq);
 
- 	}
 
- expire:
 
- 	cfq_slice_expired(cfqd, timed_out);
 
- out_kick:
 
- 	cfq_schedule_dispatch(cfqd);
 
- out_cont:
 
- 	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
 
- }
 
- static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
 
- {
 
- 	del_timer_sync(&cfqd->idle_slice_timer);
 
- 	cancel_work_sync(&cfqd->unplug_work);
 
- }
 
- static void cfq_put_async_queues(struct cfq_data *cfqd)
 
- {
 
- 	int i;
 
- 	for (i = 0; i < IOPRIO_BE_NR; i++) {
 
- 		if (cfqd->async_cfqq[0][i])
 
- 			cfq_put_queue(cfqd->async_cfqq[0][i]);
 
- 		if (cfqd->async_cfqq[1][i])
 
- 			cfq_put_queue(cfqd->async_cfqq[1][i]);
 
- 	}
 
- 	if (cfqd->async_idle_cfqq)
 
- 		cfq_put_queue(cfqd->async_idle_cfqq);
 
- }
 
- static void cfq_exit_queue(struct elevator_queue *e)
 
- {
 
- 	struct cfq_data *cfqd = e->elevator_data;
 
- 	struct request_queue *q = cfqd->queue;
 
- 	cfq_shutdown_timer_wq(cfqd);
 
- 	spin_lock_irq(q->queue_lock);
 
- 	if (cfqd->active_queue)
 
- 		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
 
- 	cfq_put_async_queues(cfqd);
 
- 	spin_unlock_irq(q->queue_lock);
 
- 	cfq_shutdown_timer_wq(cfqd);
 
- #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
- 	blkcg_deactivate_policy(q, &blkcg_policy_cfq);
 
- #else
 
- 	kfree(cfqd->root_group);
 
- #endif
 
- 	kfree(cfqd);
 
- }
 
- static int cfq_init_queue(struct request_queue *q)
 
- {
 
- 	struct cfq_data *cfqd;
 
- 	struct blkcg_gq *blkg __maybe_unused;
 
- 	int i, ret;
 
- 	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
 
- 	if (!cfqd)
 
- 		return -ENOMEM;
 
- 	cfqd->queue = q;
 
- 	q->elevator->elevator_data = cfqd;
 
- 	/* Init root service tree */
 
- 	cfqd->grp_service_tree = CFQ_RB_ROOT;
 
- 	/* Init root group and prefer root group over other groups by default */
 
- #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
- 	ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
 
- 	if (ret)
 
- 		goto out_free;
 
- 	cfqd->root_group = blkg_to_cfqg(q->root_blkg);
 
- #else
 
- 	ret = -ENOMEM;
 
- 	cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
 
- 					GFP_KERNEL, cfqd->queue->node);
 
- 	if (!cfqd->root_group)
 
- 		goto out_free;
 
- 	cfq_init_cfqg_base(cfqd->root_group);
 
- #endif
 
- 	cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
 
- 	/*
 
- 	 * Not strictly needed (since RB_ROOT just clears the node and we
 
- 	 * zeroed cfqd on alloc), but better be safe in case someone decides
 
- 	 * to add magic to the rb code
 
- 	 */
 
- 	for (i = 0; i < CFQ_PRIO_LISTS; i++)
 
- 		cfqd->prio_trees[i] = RB_ROOT;
 
- 	/*
 
- 	 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
 
- 	 * Grab a permanent reference to it, so that the normal code flow
 
- 	 * will not attempt to free it.  oom_cfqq is linked to root_group
 
- 	 * but shouldn't hold a reference as it'll never be unlinked.  Lose
 
- 	 * the reference from linking right away.
 
 
  |