rtl8821ce_xmit.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2016 - 2017 Realtek Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of version 2 of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. *****************************************************************************/
  15. #define _RTL8821CE_XMIT_C_
  16. #include <drv_types.h> /* PADAPTER, rtw_xmit.h and etc. */
  17. #include <hal_data.h> /* HAL_DATA_TYPE */
  18. #include "../halmac/halmac_api.h"
  19. #include "../rtl8821c.h"
  20. #include "rtl8821ce.h"
  21. /* Debug Buffer Descriptor Ring */
  22. /*#define BUF_DESC_DEBUG*/
  23. #ifdef BUF_DESC_DEBUG
  24. #define buf_desc_debug(...) RTW_INFO("BUF_DESC:" __VA_ARGS__)
  25. #else
  26. #define buf_desc_debug(...) do {} while (0)
  27. #endif
  28. static void rtl8821ce_xmit_tasklet(void *priv)
  29. {
  30. _irqL irqL;
  31. _adapter *padapter = (_adapter *)priv;
  32. HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter);
  33. struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
  34. /* try to deal with the pending packets */
  35. rtl8821ce_xmitframe_resume(padapter);
  36. }
  37. s32 rtl8821ce_init_xmit_priv(_adapter *padapter)
  38. {
  39. s32 ret = _SUCCESS;
  40. struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
  41. struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
  42. _rtw_spinlock_init(&pdvobjpriv->irq_th_lock);
  43. #ifdef PLATFORM_LINUX
  44. tasklet_init(&pxmitpriv->xmit_tasklet,
  45. (void(*)(unsigned long))rtl8821ce_xmit_tasklet,
  46. (unsigned long)padapter);
  47. #endif
  48. rtl8821c_init_xmit_priv(padapter);
  49. return ret;
  50. }
  51. void rtl8821ce_free_xmit_priv(_adapter *padapter)
  52. {
  53. struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
  54. _rtw_spinlock_free(&pdvobjpriv->irq_th_lock);
  55. }
  56. static s32 rtl8821ce_enqueue_xmitbuf(struct rtw_tx_ring *ring,
  57. struct xmit_buf *pxmitbuf)
  58. {
  59. _irqL irqL;
  60. _queue *ppending_queue = &ring->queue;
  61. if (pxmitbuf == NULL)
  62. return _FAIL;
  63. rtw_list_delete(&pxmitbuf->list);
  64. rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(ppending_queue));
  65. ring->qlen++;
  66. return _SUCCESS;
  67. }
  68. struct xmit_buf *rtl8821ce_dequeue_xmitbuf(struct rtw_tx_ring *ring)
  69. {
  70. _irqL irqL;
  71. _list *plist, *phead;
  72. struct xmit_buf *pxmitbuf = NULL;
  73. _queue *ppending_queue = &ring->queue;
  74. if (_rtw_queue_empty(ppending_queue) == _TRUE)
  75. pxmitbuf = NULL;
  76. else {
  77. phead = get_list_head(ppending_queue);
  78. plist = get_next(phead);
  79. pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
  80. rtw_list_delete(&(pxmitbuf->list));
  81. ring->qlen--;
  82. }
  83. return pxmitbuf;
  84. }
  85. static u8 *get_txbd(_adapter *padapter, u8 q_idx)
  86. {
  87. struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
  88. struct rtw_tx_ring *ring;
  89. u8 *ptxbd = NULL;
  90. int idx = 0;
  91. ring = &pxmitpriv->tx_ring[q_idx];
  92. /* DO NOT use last entry. */
  93. /* (len -1) to avoid wrap around overlap problem in cycler queue. */
  94. if (ring->qlen == (ring->entries - 1)) {
  95. RTW_INFO("No more TX desc@%d, ring->idx = %d,idx = %d\n",
  96. q_idx, ring->idx, idx);
  97. return NULL;
  98. }
  99. if (q_idx == BCN_QUEUE_INX)
  100. idx = 0;
  101. else
  102. idx = (ring->idx + ring->qlen) % ring->entries;
  103. ptxbd = (u8 *)&ring->buf_desc[idx];
  104. return ptxbd;
  105. }
  106. /*
  107. * Get txbd reg addr according to q_sel
  108. */
  109. u16 get_txbd_rw_reg(u16 q_idx)
  110. {
  111. u16 txbd_reg_addr = REG_BEQ_TXBD_IDX;
  112. switch (q_idx) {
  113. case BK_QUEUE_INX:
  114. txbd_reg_addr = REG_BKQ_TXBD_IDX;
  115. break;
  116. case BE_QUEUE_INX:
  117. txbd_reg_addr = REG_BEQ_TXBD_IDX;
  118. break;
  119. case VI_QUEUE_INX:
  120. txbd_reg_addr = REG_VIQ_TXBD_IDX;
  121. break;
  122. case VO_QUEUE_INX:
  123. txbd_reg_addr = REG_VOQ_TXBD_IDX;
  124. break;
  125. case BCN_QUEUE_INX:
  126. txbd_reg_addr = REG_BEQ_TXBD_IDX; /* need check */
  127. break;
  128. case TXCMD_QUEUE_INX:
  129. txbd_reg_addr = REG_H2CQ_TXBD_IDX;
  130. break;
  131. case MGT_QUEUE_INX:
  132. txbd_reg_addr = REG_MGQ_TXBD_IDX;
  133. break;
  134. case HIGH_QUEUE_INX:
  135. txbd_reg_addr = REG_HI0Q_TXBD_IDX; /* need check */
  136. break;
  137. default:
  138. break;
  139. }
  140. return txbd_reg_addr;
  141. }
  142. struct xmit_frame *__rtw_alloc_cmdxmitframe_8821ce(struct xmit_priv *pxmitpriv,
  143. enum cmdbuf_type buf_type)
  144. {
  145. _adapter *padapter;
  146. u16 queue_idx = BCN_QUEUE_INX;
  147. u8 *ptxdesc = NULL;
  148. padapter = GET_PRIMARY_ADAPTER(pxmitpriv->adapter);
  149. ptxdesc = get_txbd(padapter, BCN_QUEUE_INX);
  150. /* set OWN bit in Beacon tx descriptor */
  151. #if 1 /* vincent TODO */
  152. if (ptxdesc != NULL)
  153. SET_TX_BD_OWN(ptxdesc, 0);
  154. else
  155. return NULL;
  156. #endif
  157. return __rtw_alloc_cmdxmitframe(pxmitpriv, CMDBUF_BEACON);
  158. }
  159. /*
  160. * Update Read/Write pointer
  161. * Read pointer is h/w descriptor index
  162. * Write pointer is host desciptor index:
  163. * For tx side, if own bit is set in packet index n,
  164. * host pointer (write pointer) point to index n + 1.)
  165. */
  166. void fill_txbd_own(_adapter *padapter, u8 *txbd, u16 queue_idx,
  167. struct rtw_tx_ring *ptxring)
  168. {
  169. struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
  170. struct rtw_tx_ring *ring;
  171. u16 host_wp = 0;
  172. if (queue_idx == BCN_QUEUE_INX) {
  173. SET_TX_BD_OWN(txbd, 1);
  174. /* kick start */
  175. rtw_write8(padapter, REG_RX_RXBD_NUM + 1,
  176. rtw_read8(padapter, REG_RX_RXBD_NUM + 1) | BIT(4));
  177. return;
  178. }
  179. /*
  180. * update h/w index
  181. * for tx side, if own bit is set in packet index n,
  182. * host pointer (write pointer) point to index n + 1.
  183. */
  184. /* for current tx packet, enqueue has been ring->qlen++ before.
  185. * so, host_wp = ring->idx + ring->qlen.
  186. */
  187. host_wp = (ptxring->idx + ptxring->qlen) % ptxring->entries;
  188. rtw_write16(padapter, get_txbd_rw_reg(queue_idx), host_wp);
  189. }
  190. static u16 ffaddr2dma(u32 addr)
  191. {
  192. u16 dma_ctrl;
  193. switch (addr) {
  194. case VO_QUEUE_INX:
  195. dma_ctrl = BIT3;
  196. break;
  197. case VI_QUEUE_INX:
  198. dma_ctrl = BIT2;
  199. break;
  200. case BE_QUEUE_INX:
  201. dma_ctrl = BIT1;
  202. break;
  203. case BK_QUEUE_INX:
  204. dma_ctrl = BIT0;
  205. break;
  206. case BCN_QUEUE_INX:
  207. dma_ctrl = BIT4;
  208. break;
  209. case MGT_QUEUE_INX:
  210. dma_ctrl = BIT6;
  211. break;
  212. case HIGH_QUEUE_INX:
  213. dma_ctrl = BIT7;
  214. break;
  215. default:
  216. dma_ctrl = 0;
  217. break;
  218. }
  219. return dma_ctrl;
  220. }
  221. /*
  222. * Fill tx buffer desciptor. Map each buffer address in tx buffer descriptor
  223. * segment. Designed for tx buffer descriptor architecture
  224. * Input *pmem: pointer to the Tx Buffer Descriptor
  225. */
  226. static void rtl8821ce_update_txbd(struct xmit_frame *pxmitframe,
  227. u8 *txbd, s32 sz)
  228. {
  229. _adapter *padapter = pxmitframe->padapter;
  230. struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
  231. dma_addr_t mapping;
  232. u32 i = 0;
  233. u16 seg_num =
  234. ((TX_BUFFER_SEG_NUM == 0) ? 2 : ((TX_BUFFER_SEG_NUM == 1) ? 4 : 8));
  235. u16 tx_page_size_reg = 1;
  236. u16 page_size_length = 0;
  237. /* map TX DESC buf_addr (including TX DESC + tx data) */
  238. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0))
  239. mapping = pci_map_single(pdvobjpriv->ppcidev, pxmitframe->buf_addr,
  240. sz + TX_WIFI_INFO_SIZE, PCI_DMA_TODEVICE);
  241. #else
  242. mapping = dma_map_single(&(pdvobjpriv->ppcidev)->dev, pxmitframe->buf_addr,
  243. sz + TX_WIFI_INFO_SIZE, DMA_TO_DEVICE);
  244. #endif
  245. /* Calculate page size.
  246. * Total buffer length including TX_WIFI_INFO and PacketLen
  247. */
  248. if (tx_page_size_reg > 0) {
  249. page_size_length = (sz + TX_WIFI_INFO_SIZE) /
  250. (tx_page_size_reg * 128);
  251. if (((sz + TX_WIFI_INFO_SIZE) % (tx_page_size_reg * 128)) > 0)
  252. page_size_length++;
  253. }
  254. /*
  255. * Reset all tx buffer desciprtor content
  256. * -- Reset first element
  257. */
  258. SET_TX_BD_TX_BUFF_SIZE0(txbd, 0);
  259. SET_TX_BD_PSB(txbd, 0);
  260. SET_TX_BD_OWN(txbd, 0);
  261. /* -- Reset second and other element */
  262. for (i = 1 ; i < seg_num ; i++) {
  263. SET_TXBUFFER_DESC_LEN_WITH_OFFSET(txbd, i, 0);
  264. SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(txbd, i, 0);
  265. SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(txbd, i, 0);
  266. }
  267. /*
  268. * Fill buffer length of the first buffer,
  269. * For 8821ce, it is required that TX_WIFI_INFO is put in first segment,
  270. * and the size of the first segment cannot be larger than
  271. * TX_WIFI_INFO_SIZE.
  272. */
  273. SET_TX_BD_TX_BUFF_SIZE0(txbd, TX_WIFI_INFO_SIZE);
  274. SET_TX_BD_PSB(txbd, page_size_length);
  275. /* starting addr of TXDESC */
  276. SET_TX_BD_PHYSICAL_ADDR0_LOW(txbd, mapping);
  277. #ifdef CONFIG_64BIT_DMA
  278. SET_TX_BD_PHYSICAL_ADDR0_HIGH(txbd, mapping >> 32);
  279. #endif
  280. /*
  281. * It is assumed that in linux implementation, packet is coalesced
  282. * in only one buffer. Extension mode is not supported here
  283. */
  284. SET_TXBUFFER_DESC_LEN_WITH_OFFSET(txbd, 1, sz);
  285. /* don't using extendsion mode. */
  286. SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(txbd, 1, 0);
  287. SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(txbd, 1,
  288. mapping + TX_WIFI_INFO_SIZE); /* pkt */
  289. #ifdef CONFIG_64BIT_DMA
  290. SET_TXBUFFER_DESC_ADD_HIGH_WITH_OFFSET(txbd, 1,
  291. (mapping + TX_WIFI_INFO_SIZE) >> 32); /* pkt */
  292. #endif
  293. /*buf_desc_debug("TX:%s, txbd = 0x%p\n", __FUNCTION__, txbd);*/
  294. buf_desc_debug("%s, txbd = 0x%08x\n", __func__, txbd);
  295. buf_desc_debug("TXBD:, 00h(0x%08x)\n", *((u32 *)(txbd)));
  296. buf_desc_debug("TXBD:, 04h(0x%08x)\n", *((u32 *)(txbd + 4)));
  297. buf_desc_debug("TXBD:, 08h(0x%08x)\n", *((u32 *)(txbd + 8)));
  298. buf_desc_debug("TXBD:, 12h(0x%08x)\n", *((u32 *)(txbd + 12)));
  299. }
  300. static s32 update_txdesc(struct xmit_frame *pxmitframe, s32 sz)
  301. {
  302. uint qsel;
  303. u8 data_rate, pwr_status;
  304. _adapter *padapter = pxmitframe->padapter;
  305. struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
  306. struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
  307. struct pkt_attrib *pattrib = &pxmitframe->attrib;
  308. HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter);
  309. struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
  310. struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
  311. u8 *ptxdesc;
  312. sint bmcst = IS_MCAST(pattrib->ra);
  313. u16 SWDefineContent = 0x0;
  314. u8 DriverFixedRate = 0x0;
  315. u8 hw_port = rtw_hal_get_port(padapter);
  316. ptxdesc = pxmitframe->buf_addr;
  317. _rtw_memset(ptxdesc, 0, TXDESC_SIZE);
  318. /* offset 0 */
  319. /*SET_TX_DESC_FIRST_SEG_8812(ptxdesc, 1);*/
  320. SET_TX_DESC_LS_8821C(ptxdesc, 1);
  321. /*SET_TX_DESC_OWN_8812(ptxdesc, 1);*/
  322. SET_TX_DESC_TXPKTSIZE_8821C(ptxdesc, sz);
  323. /* TX_DESC is not included in the data,
  324. * driver needs to fill in the TX_DESC with qsel=h2c
  325. * Offset in TX_DESC should be set to 0.
  326. */
  327. #ifdef CONFIG_TX_EARLY_MODE
  328. SET_TX_DESC_PKT_OFFSET_8812(ptxdesc, 1);
  329. if (pattrib->qsel == HALMAC_TXDESC_QSEL_H2C_CMD)
  330. SET_TX_DESC_OFFSET_8821C(ptxdesc, 0);
  331. else
  332. SET_TX_DESC_OFFSET_8821C(ptxdesc,
  333. TXDESC_SIZE + EARLY_MODE_INFO_SIZE);
  334. #else
  335. if (pattrib->qsel == HALMAC_TXDESC_QSEL_H2C_CMD)
  336. SET_TX_DESC_OFFSET_8821C(ptxdesc, 0);
  337. else
  338. SET_TX_DESC_OFFSET_8821C(ptxdesc, TXDESC_SIZE);
  339. #endif
  340. if (bmcst)
  341. SET_TX_DESC_BMC_8821C(ptxdesc, 1);
  342. SET_TX_DESC_MACID_8821C(ptxdesc, pattrib->mac_id);
  343. SET_TX_DESC_RATE_ID_8821C(ptxdesc, pattrib->raid);
  344. SET_TX_DESC_QSEL_8821C(ptxdesc, pattrib->qsel);
  345. if (!pattrib->qos_en) {
  346. /* Hw set sequence number */
  347. SET_TX_DESC_DISQSELSEQ_8821C(ptxdesc, 1);
  348. SET_TX_DESC_EN_HWSEQ_8821C(ptxdesc, 1);
  349. SET_TX_DESC_HW_SSN_SEL_8821C(ptxdesc, pattrib->hw_ssn_sel);
  350. SET_TX_DESC_EN_HWEXSEQ_8821C(ptxdesc, 0);
  351. } else
  352. SET_TX_DESC_SW_SEQ_8821C(ptxdesc, pattrib->seqnum);
  353. if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
  354. rtl8821c_fill_txdesc_sectype(pattrib, ptxdesc);
  355. rtl8821c_fill_txdesc_vcs(padapter, pattrib, ptxdesc);
  356. #ifdef CONFIG_CONCURRENT_MODE
  357. if (bmcst)
  358. fill_txdesc_force_bmc_camid(pattrib, ptxdesc);
  359. #endif
  360. #ifdef CONFIG_SUPPORT_DYNAMIC_TXPWR
  361. rtw_phydm_set_dyntxpwr(padapter, ptxdesc, pattrib->mac_id);
  362. #endif
  363. if ((pattrib->ether_type != 0x888e) &&
  364. (pattrib->ether_type != 0x0806) &&
  365. (pattrib->ether_type != 0x88b4) &&
  366. (pattrib->dhcp_pkt != 1)
  367. #ifdef CONFIG_AUTO_AP_MODE
  368. && (pattrib->pctrl != _TRUE)
  369. #endif
  370. ) {
  371. /* Non EAP & ARP & DHCP type data packet */
  372. if (pattrib->ampdu_en == _TRUE) {
  373. /* 8821c does NOT support AGG broadcast pkt */
  374. if (!bmcst)
  375. SET_TX_DESC_AGG_EN_8821C(ptxdesc, 1);
  376. SET_TX_DESC_MAX_AGG_NUM_8821C(ptxdesc, 0x1f);
  377. /* Set A-MPDU aggregation. */
  378. SET_TX_DESC_AMPDU_DENSITY_8821C(ptxdesc,
  379. pattrib->ampdu_spacing);
  380. } else
  381. SET_TX_DESC_BK_8821C(ptxdesc, 1);
  382. rtl8821c_fill_txdesc_phy(padapter, pattrib, ptxdesc);
  383. /* DATA Rate FB LMT */
  384. /* compatibility for MCC consideration,
  385. * use pmlmeext->cur_channel
  386. */
  387. if (pmlmeext->cur_channel > 14)
  388. /* for 5G. OFMD 6M */
  389. SET_TX_DESC_DATA_RTY_LOWEST_RATE_8821C(
  390. ptxdesc, 4);
  391. else
  392. /* for 2.4G. CCK 1M */
  393. SET_TX_DESC_DATA_RTY_LOWEST_RATE_8821C(
  394. ptxdesc, 0);
  395. if (pHalData->fw_ractrl == _FALSE) {
  396. SET_TX_DESC_USE_RATE_8821C(ptxdesc, 1);
  397. DriverFixedRate = 0x01;
  398. if (pHalData->INIDATA_RATE[pattrib->mac_id] &
  399. BIT(7))
  400. SET_TX_DESC_DATA_SHORT_8821C(
  401. ptxdesc, 1);
  402. SET_TX_DESC_DATARATE_8821C(ptxdesc,
  403. pHalData->INIDATA_RATE[pattrib->mac_id]
  404. & 0x7F);
  405. }
  406. if (bmcst) {
  407. DriverFixedRate = 0x01;
  408. fill_txdesc_bmc_tx_rate(pattrib, ptxdesc);
  409. }
  410. if (padapter->fix_rate != 0xFF) {
  411. /* modify data rate by iwpriv */
  412. SET_TX_DESC_USE_RATE_8821C(ptxdesc, 1);
  413. DriverFixedRate = 0x01;
  414. if (padapter->fix_rate & BIT(7))
  415. SET_TX_DESC_DATA_SHORT_8821C(
  416. ptxdesc, 1);
  417. SET_TX_DESC_DATARATE_8821C(ptxdesc,
  418. (padapter->fix_rate & 0x7F));
  419. if (!padapter->data_fb)
  420. SET_TX_DESC_DISDATAFB_8821C(ptxdesc, 1);
  421. }
  422. if (pattrib->ldpc)
  423. SET_TX_DESC_DATA_LDPC_8821C(ptxdesc, 1);
  424. if (pattrib->stbc)
  425. SET_TX_DESC_DATA_STBC_8821C(ptxdesc, 1);
  426. #ifdef CONFIG_WMMPS_STA
  427. if (pattrib->trigger_frame)
  428. SET_TX_DESC_TRI_FRAME_8821C (ptxdesc, 1);
  429. #endif /* CONFIG_WMMPS_STA */
  430. } else {
  431. /*
  432. * EAP data packet and ARP packet and DHCP.
  433. * Use the 1M data rate to send the EAP/ARP packet.
  434. * This will maybe make the handshake smooth.
  435. */
  436. SET_TX_DESC_USE_RATE_8821C(ptxdesc, 1);
  437. DriverFixedRate = 0x01;
  438. SET_TX_DESC_BK_8821C(ptxdesc, 1);
  439. /* HW will ignore this setting if the transmission rate
  440. * is legacy OFDM.
  441. */
  442. if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
  443. SET_TX_DESC_DATA_SHORT_8821C(ptxdesc, 1);
  444. SET_TX_DESC_DATARATE_8821C(ptxdesc,
  445. MRateToHwRate(pmlmeext->tx_rate));
  446. }
  447. #ifdef CONFIG_TDLS
  448. #ifdef CONFIG_XMIT_ACK
  449. /* CCX-TXRPT ack for xmit mgmt frames. */
  450. if (pxmitframe->ack_report) {
  451. SET_TX_DESC_SPE_RPT_8821C(ptxdesc, 1);
  452. #ifdef DBG_CCX
  453. RTW_INFO("%s set tx report\n", __func__);
  454. #endif
  455. }
  456. #endif /* CONFIG_XMIT_ACK */
  457. #endif
  458. } else if ((pxmitframe->frame_tag & 0x0f) == MGNT_FRAMETAG) {
  459. SET_TX_DESC_MBSSID_8821C(ptxdesc, pattrib->mbssid & 0xF);
  460. SET_TX_DESC_USE_RATE_8821C(ptxdesc, 1);
  461. DriverFixedRate = 0x01;
  462. SET_TX_DESC_DATARATE_8821C(ptxdesc, MRateToHwRate(pattrib->rate));
  463. SET_TX_DESC_RTY_LMT_EN_8821C(ptxdesc, 1);
  464. if (pattrib->retry_ctrl == _TRUE)
  465. SET_TX_DESC_RTS_DATA_RTY_LMT_8821C(ptxdesc, 6);
  466. else
  467. SET_TX_DESC_RTS_DATA_RTY_LMT_8821C(ptxdesc, 12);
  468. /*rtl8821c_fill_txdesc_mgnt_bf(pxmitframe, ptxdesc); Todo for 8821C*/
  469. #ifdef CONFIG_XMIT_ACK
  470. /* CCX-TXRPT ack for xmit mgmt frames. */
  471. if (pxmitframe->ack_report) {
  472. SET_TX_DESC_SPE_RPT_8821C(ptxdesc, 1);
  473. #ifdef DBG_CCX
  474. RTW_INFO("%s set tx report\n", __func__);
  475. #endif
  476. }
  477. #endif /* CONFIG_XMIT_ACK */
  478. } else if ((pxmitframe->frame_tag & 0x0f) == TXAGG_FRAMETAG)
  479. RTW_INFO("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
  480. #ifdef CONFIG_MP_INCLUDED
  481. else if (((pxmitframe->frame_tag & 0x0f) == MP_FRAMETAG) &&
  482. (padapter->registrypriv.mp_mode == 1))
  483. fill_txdesc_for_mp(padapter, ptxdesc);
  484. #endif
  485. else {
  486. RTW_INFO("pxmitframe->frame_tag = %d\n",
  487. pxmitframe->frame_tag);
  488. SET_TX_DESC_USE_RATE_8821C(ptxdesc, 1);
  489. DriverFixedRate = 0x01;
  490. SET_TX_DESC_DATARATE_8821C(ptxdesc,
  491. MRateToHwRate(pmlmeext->tx_rate));
  492. }
  493. #ifdef CONFIG_ANTENNA_DIVERSITY
  494. ODM_SetTxAntByTxInfo(&pHalData->odmpriv, ptxdesc,
  495. pxmitframe->attrib.mac_id);
  496. #endif
  497. /*rtl8821c_fill_txdesc_bf(pxmitframe, ptxdesc);Todo for 8821C*/
  498. /*SET_TX_DESC_TX_BUFFER_SIZE_8812(ptxdesc, sz);*/
  499. if (DriverFixedRate)
  500. SWDefineContent |= 0x01;
  501. SET_TX_DESC_SW_DEFINE_8821C(ptxdesc, SWDefineContent);
  502. SET_TX_DESC_PORT_ID_8821C(ptxdesc, hw_port);
  503. SET_TX_DESC_MULTIPLE_PORT_8821C(ptxdesc, hw_port);
  504. rtl8821c_cal_txdesc_chksum(padapter, ptxdesc);
  505. rtl8821c_dbg_dump_tx_desc(padapter, pxmitframe->frame_tag, ptxdesc);
  506. return 0;
  507. }
  508. s32 rtl8821ce_dump_xframe(_adapter *padapter, struct xmit_frame *pxmitframe)
  509. {
  510. s32 ret = _SUCCESS;
  511. s32 inner_ret = _SUCCESS;
  512. _irqL irqL;
  513. int t, sz, w_sz, pull = 0;
  514. u32 ff_hwaddr;
  515. struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
  516. struct pkt_attrib *pattrib = &pxmitframe->attrib;
  517. struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
  518. struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
  519. struct security_priv *psecuritypriv = &padapter->securitypriv;
  520. u8 *txbd;
  521. struct rtw_tx_ring *ptx_ring;
  522. #ifdef CONFIG_80211N_HT
  523. if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
  524. (pxmitframe->attrib.ether_type != 0x0806) &&
  525. (pxmitframe->attrib.ether_type != 0x888e) &&
  526. (pxmitframe->attrib.dhcp_pkt != 1))
  527. rtw_issue_addbareq_cmd(padapter, pxmitframe);
  528. #endif /* CONFIG_80211N_HT */
  529. for (t = 0; t < pattrib->nr_frags; t++) {
  530. if (inner_ret != _SUCCESS && ret == _SUCCESS)
  531. ret = _FAIL;
  532. if (t != (pattrib->nr_frags - 1)) {
  533. sz = pxmitpriv->frag_len - 4;
  534. if (!psecuritypriv->sw_encrypt)
  535. sz -= pattrib->icv_len;
  536. } else {
  537. /* no frag */
  538. sz = pattrib->last_txcmdsz;
  539. }
  540. ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
  541. _enter_critical(&pdvobjpriv->irq_th_lock, &irqL);
  542. txbd = get_txbd(GET_PRIMARY_ADAPTER(padapter), ff_hwaddr);
  543. ptx_ring = &(GET_PRIMARY_ADAPTER(padapter)->xmitpriv.tx_ring[ff_hwaddr]);
  544. #ifndef CONFIG_BCN_ICF
  545. if (ff_hwaddr == BCN_QUEUE_INX)
  546. padapter->xmitpriv.beaconDMAing = _TRUE;
  547. #endif
  548. if (txbd == NULL) {
  549. _exit_critical(&pdvobjpriv->irq_th_lock, &irqL);
  550. rtw_sctx_done_err(&pxmitbuf->sctx,
  551. RTW_SCTX_DONE_TX_DESC_NA);
  552. rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
  553. RTW_INFO("##### Tx desc unavailable !#####\n");
  554. break;
  555. }
  556. if (pattrib->qsel != HALMAC_TXDESC_QSEL_H2C_CMD)
  557. update_txdesc(pxmitframe, sz);
  558. /* rtl8821ce_update_txbd() must be called after update_txdesc()
  559. * It rely on rtl8821ce_update_txbd() to map it into non cache memory
  560. */
  561. rtl8821ce_update_txbd(pxmitframe, txbd, sz);
  562. if (pxmitbuf->buf_tag != XMITBUF_CMD)
  563. rtl8821ce_enqueue_xmitbuf(ptx_ring, pxmitbuf);
  564. pxmitbuf->len = sz + TX_WIFI_INFO_SIZE;
  565. w_sz = sz;
  566. /* Please comment here */
  567. wmb();
  568. fill_txbd_own(padapter, txbd, ff_hwaddr, ptx_ring);
  569. #ifdef DBG_TXBD_DESC_DUMP
  570. if (pxmitpriv->dump_txbd_desc)
  571. rtw_tx_desc_backup(padapter, pxmitframe, TX_WIFI_INFO_SIZE, ff_hwaddr);
  572. #endif
  573. _exit_critical(&pdvobjpriv->irq_th_lock, &irqL);
  574. inner_ret = rtw_write_port(padapter, ff_hwaddr, w_sz,
  575. (unsigned char *)pxmitbuf);
  576. rtw_count_tx_stats(padapter, pxmitframe, sz);
  577. }
  578. rtw_free_xmitframe(pxmitpriv, pxmitframe);
  579. if (ret != _SUCCESS)
  580. rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
  581. return ret;
  582. }
  583. /*
  584. * Packet should not be dequeued if there is no available descriptor
  585. * return: _SUCCESS if there is available descriptor
  586. */
  587. static u8 check_tx_desc_resource(_adapter *padapter, int prio)
  588. {
  589. struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
  590. struct rtw_tx_ring *ring;
  591. ring = &pxmitpriv->tx_ring[prio];
  592. /*
  593. * for now we reserve two free descriptor as a safety boundary
  594. * between the tail and the head
  595. */
  596. if ((ring->entries - ring->qlen) >= 2)
  597. return _TRUE;
  598. else
  599. return _FALSE;
  600. }
  601. static u8 check_nic_enough_desc_all(_adapter *padapter)
  602. {
  603. u8 status = (check_tx_desc_resource(padapter, VI_QUEUE_INX) &&
  604. check_tx_desc_resource(padapter, VO_QUEUE_INX) &&
  605. check_tx_desc_resource(padapter, BE_QUEUE_INX) &&
  606. check_tx_desc_resource(padapter, BK_QUEUE_INX) &&
  607. check_tx_desc_resource(padapter, MGT_QUEUE_INX) &&
  608. check_tx_desc_resource(padapter, TXCMD_QUEUE_INX) &&
  609. check_tx_desc_resource(padapter, HIGH_QUEUE_INX));
  610. return status;
  611. }
  612. static u8 check_nic_enough_desc(_adapter *padapter, struct pkt_attrib *pattrib)
  613. {
  614. u32 prio;
  615. struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
  616. struct rtw_tx_ring *ring;
  617. switch (pattrib->qsel) {
  618. case 0:
  619. case 3:
  620. prio = BE_QUEUE_INX;
  621. break;
  622. case 1:
  623. case 2:
  624. prio = BK_QUEUE_INX;
  625. break;
  626. case 4:
  627. case 5:
  628. prio = VI_QUEUE_INX;
  629. break;
  630. case 6:
  631. case 7:
  632. prio = VO_QUEUE_INX;
  633. break;
  634. default:
  635. prio = BE_QUEUE_INX;
  636. break;
  637. }
  638. ring = &pxmitpriv->tx_ring[prio];
  639. /*
  640. * for now we reserve two free descriptor as a safety boundary
  641. * between the tail and the head
  642. */
  643. if ((ring->entries - ring->qlen) >= 2)
  644. return _TRUE;
  645. else
  646. return _FALSE;
  647. }
  648. #ifdef CONFIG_XMIT_THREAD_MODE
  649. /*
  650. * Description
  651. * Transmit xmitbuf to hardware tx fifo
  652. *
  653. * Return
  654. * _SUCCESS ok
  655. * _FAIL something error
  656. */
  657. s32 rtl8821ce_xmit_buf_handler(_adapter *padapter)
  658. {
  659. PHAL_DATA_TYPE phal;
  660. struct xmit_priv *pxmitpriv;
  661. struct xmit_buf *pxmitbuf;
  662. struct xmit_frame *pxmitframe;
  663. s32 ret;
  664. phal = GET_HAL_DATA(padapter);
  665. pxmitpriv = &padapter->xmitpriv;
  666. ret = _rtw_down_sema(&pxmitpriv->xmit_sema);
  667. if (ret == _FAIL) {
  668. RTW_ERR("%s: down XmitBufSema fail!\n", __FUNCTION__);
  669. return _FAIL;
  670. }
  671. if (RTW_CANNOT_RUN(padapter)) {
  672. RTW_INFO("%s: bDriverStopped(%s) bSurpriseRemoved(%s)!\n"
  673. , __func__
  674. , rtw_is_drv_stopped(padapter)?"True":"False"
  675. , rtw_is_surprise_removed(padapter)?"True":"False");
  676. return _FAIL;
  677. }
  678. if (check_pending_xmitbuf(pxmitpriv) == _FALSE)
  679. return _SUCCESS;
  680. #ifdef CONFIG_LPS_LCLK
  681. ret = rtw_register_tx_alive(padapter);
  682. if (ret != _SUCCESS) {
  683. RTW_INFO("%s: wait to leave LPS_LCLK\n", __FUNCTION__);
  684. return _SUCCESS;
  685. }
  686. #endif
  687. do {
  688. pxmitbuf = select_and_dequeue_pending_xmitbuf(padapter);
  689. if (pxmitbuf == NULL)
  690. break;
  691. pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data;
  692. if (check_nic_enough_desc(padapter, &pxmitframe->attrib) == _FALSE) {
  693. enqueue_pending_xmitbuf_to_head(pxmitpriv, pxmitbuf);
  694. break;
  695. }
  696. rtl8821ce_dump_xframe(padapter, pxmitframe);
  697. } while (1);
  698. return _SUCCESS;
  699. }
  700. #endif
  701. static s32 xmitframe_direct(_adapter *padapter, struct xmit_frame *pxmitframe)
  702. {
  703. #ifdef CONFIG_XMIT_THREAD_MODE
  704. struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
  705. #endif
  706. s32 res = _SUCCESS;
  707. res = rtw_xmitframe_coalesce(padapter, pxmitframe->pkt, pxmitframe);
  708. if (res == _SUCCESS) {
  709. #ifdef CONFIG_XMIT_THREAD_MODE
  710. enqueue_pending_xmitbuf(pxmitpriv, pxmitframe->pxmitbuf);
  711. #else
  712. rtl8821ce_dump_xframe(padapter, pxmitframe);
  713. #endif
  714. }
  715. return res;
  716. }
  717. #ifdef CONFIG_TX_AMSDU
  718. static s32 xmitframe_amsdu_direct(_adapter *padapter, struct xmit_frame *pxmitframe)
  719. {
  720. struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
  721. struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
  722. s32 res = _SUCCESS;
  723. res = rtw_xmitframe_coalesce_amsdu(padapter, pxmitframe, NULL);
  724. if (res == _SUCCESS) {
  725. #ifdef CONFIG_XMIT_THREAD_MODE
  726. enqueue_pending_xmitbuf(pxmitpriv, pxmitframe->pxmitbuf);
  727. #else
  728. res = rtl8821ce_dump_xframe(padapter, pxmitframe);
  729. #endif
  730. } else {
  731. rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
  732. rtw_free_xmitframe(pxmitpriv, pxmitframe);
  733. }
  734. return res;
  735. }
  736. #endif
  737. void rtl8821ce_xmitframe_resume(_adapter *padapter)
  738. {
  739. struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
  740. struct xmit_frame *pxmitframe = NULL;
  741. struct xmit_buf *pxmitbuf = NULL;
  742. int res = _SUCCESS, xcnt = 0;
  743. #ifdef CONFIG_TX_AMSDU
  744. struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
  745. struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
  746. int tx_amsdu = padapter->tx_amsdu;
  747. int tx_amsdu_rate = padapter->tx_amsdu_rate;
  748. int current_tx_rate = pdvobjpriv->traffic_stat.cur_tx_tp;
  749. struct pkt_attrib *pattrib = NULL;
  750. struct xmit_frame *pxmitframe_next = NULL;
  751. struct xmit_buf *pxmitbuf_next = NULL;
  752. struct pkt_attrib *pattrib_next = NULL;
  753. int num_frame = 0;
  754. u8 amsdu_timeout = 0;
  755. #endif
  756. while (1) {
  757. if (RTW_CANNOT_RUN(padapter)) {
  758. RTW_INFO("%s => bDriverStopped or bSurpriseRemoved\n",
  759. __func__);
  760. break;
  761. }
  762. #ifndef CONFIG_XMIT_THREAD_MODE
  763. if (!check_nic_enough_desc_all(padapter))
  764. break;
  765. #endif
  766. pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
  767. if (!pxmitbuf)
  768. break;
  769. #ifdef CONFIG_TX_AMSDU
  770. if (tx_amsdu == 0)
  771. goto dump_pkt;
  772. if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE))
  773. goto dump_pkt;
  774. pxmitframe = rtw_get_xframe(pxmitpriv, &num_frame);
  775. if (num_frame == 0 || pxmitframe == NULL || !check_amsdu(pxmitframe))
  776. goto dump_pkt;
  777. pattrib = &pxmitframe->attrib;
  778. if (tx_amsdu == 1) {
  779. pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits,
  780. pxmitpriv->hwxmit_entry);
  781. if (pxmitframe) {
  782. pxmitframe->pxmitbuf = pxmitbuf;
  783. pxmitframe->buf_addr = pxmitbuf->pbuf;
  784. pxmitbuf->priv_data = pxmitframe;
  785. xmitframe_amsdu_direct(padapter, pxmitframe);
  786. pxmitpriv->amsdu_debug_coalesce_one++;
  787. continue;
  788. } else {
  789. rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
  790. break;
  791. }
  792. } else if (tx_amsdu == 2 && ((tx_amsdu_rate == 0) || (current_tx_rate > tx_amsdu_rate))) {
  793. if (num_frame == 1) {
  794. amsdu_timeout = rtw_amsdu_get_timer_status(padapter, pattrib->priority);
  795. if (amsdu_timeout == RTW_AMSDU_TIMER_UNSET) {
  796. rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
  797. rtw_amsdu_set_timer_status(padapter,
  798. pattrib->priority, RTW_AMSDU_TIMER_SETTING);
  799. rtw_amsdu_set_timer(padapter, pattrib->priority);
  800. pxmitpriv->amsdu_debug_set_timer++;
  801. break;
  802. } else if (amsdu_timeout == RTW_AMSDU_TIMER_SETTING) {
  803. rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
  804. break;
  805. } else if (amsdu_timeout == RTW_AMSDU_TIMER_TIMEOUT) {
  806. rtw_amsdu_set_timer_status(padapter,
  807. pattrib->priority, RTW_AMSDU_TIMER_UNSET);
  808. pxmitpriv->amsdu_debug_timeout++;
  809. pxmitframe = rtw_dequeue_xframe(pxmitpriv,
  810. pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
  811. if (pxmitframe) {
  812. pxmitframe->pxmitbuf = pxmitbuf;
  813. pxmitframe->buf_addr = pxmitbuf->pbuf;
  814. pxmitbuf->priv_data = pxmitframe;
  815. xmitframe_amsdu_direct(padapter, pxmitframe);
  816. } else {
  817. rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
  818. }
  819. break;
  820. }
  821. } else/* num_frame > 1*/{
  822. pxmitframe = rtw_dequeue_xframe(pxmitpriv,
  823. pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
  824. if (!pxmitframe) {
  825. rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
  826. break;
  827. }
  828. pxmitframe->pxmitbuf = pxmitbuf;
  829. pxmitframe->buf_addr = pxmitbuf->pbuf;
  830. pxmitbuf->priv_data = pxmitframe;
  831. pxmitframe_next = rtw_get_xframe(pxmitpriv, &num_frame);
  832. if (num_frame == 0) {
  833. xmitframe_amsdu_direct(padapter, pxmitframe);
  834. pxmitpriv->amsdu_debug_coalesce_one++;
  835. break;
  836. }
  837. if (!check_amsdu(pxmitframe_next)) {
  838. xmitframe_amsdu_direct(padapter, pxmitframe);
  839. pxmitpriv->amsdu_debug_coalesce_one++;
  840. continue;
  841. } else {
  842. pxmitbuf_next = rtw_alloc_xmitbuf(pxmitpriv);
  843. if (!pxmitbuf_next) {
  844. xmitframe_amsdu_direct(padapter, pxmitframe);
  845. pxmitpriv->amsdu_debug_coalesce_one++;
  846. continue;
  847. }
  848. pxmitframe_next = rtw_dequeue_xframe(pxmitpriv,
  849. pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
  850. if (!pxmitframe_next) {
  851. rtw_free_xmitbuf(pxmitpriv, pxmitbuf_next);
  852. xmitframe_amsdu_direct(padapter, pxmitframe);
  853. pxmitpriv->amsdu_debug_coalesce_one++;
  854. continue;
  855. }
  856. pxmitframe_next->pxmitbuf = pxmitbuf_next;
  857. pxmitframe_next->buf_addr = pxmitbuf_next->pbuf;
  858. pxmitbuf_next->priv_data = pxmitframe_next;
  859. rtw_xmitframe_coalesce_amsdu(padapter,
  860. pxmitframe_next, pxmitframe);
  861. rtw_free_xmitframe(pxmitpriv, pxmitframe);
  862. rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
  863. #ifdef CONFIG_XMIT_THREAD_MODE
  864. enqueue_pending_xmitbuf(pxmitpriv, pxmitframe_next->pxmitbuf);
  865. #else
  866. rtl8821ce_dump_xframe(padapter, pxmitframe_next);
  867. #endif
  868. pxmitpriv->amsdu_debug_coalesce_two++;
  869. continue;
  870. }
  871. }
  872. }
  873. dump_pkt:
  874. #endif /* CONFIG_TX_AMSDU */
  875. pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits,
  876. pxmitpriv->hwxmit_entry);
  877. if (pxmitframe) {
  878. pxmitframe->pxmitbuf = pxmitbuf;
  879. pxmitframe->buf_addr = pxmitbuf->pbuf;
  880. pxmitbuf->priv_data = pxmitframe;
  881. if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
  882. if (pxmitframe->attrib.priority <= 15) {
  883. /* TID0~15 */
  884. res = rtw_xmitframe_coalesce(padapter,
  885. pxmitframe->pkt, pxmitframe);
  886. }
  887. /* always return ndis_packet after
  888. * rtw_xmitframe_coalesce
  889. */
  890. rtw_os_xmit_complete(padapter, pxmitframe);
  891. }
  892. if (res == _SUCCESS) {
  893. #ifdef CONFIG_XMIT_THREAD_MODE
  894. enqueue_pending_xmitbuf(pxmitpriv, pxmitframe->pxmitbuf);
  895. #else
  896. rtl8821ce_dump_xframe(padapter, pxmitframe);
  897. #endif
  898. } else {
  899. rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
  900. rtw_free_xmitframe(pxmitpriv, pxmitframe);
  901. }
  902. xcnt++;
  903. } else {
  904. rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
  905. break;
  906. }
  907. }
  908. }
  909. /*
  910. * Return
  911. * _TRUE dump packet directly
  912. * _FALSE enqueue packet
  913. */
  914. static s32 pre_xmitframe(_adapter *padapter, struct xmit_frame *pxmitframe)
  915. {
  916. _irqL irqL;
  917. s32 res;
  918. struct xmit_buf *pxmitbuf = NULL;
  919. struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
  920. struct pkt_attrib *pattrib = &pxmitframe->attrib;
  921. struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
  922. #ifdef CONFIG_TX_AMSDU
  923. int tx_amsdu = padapter->tx_amsdu;
  924. u8 amsdu_timeout = 0;
  925. #endif
  926. _enter_critical_bh(&pxmitpriv->lock, &irqL);
  927. if (rtw_txframes_sta_ac_pending(padapter, pattrib) > 0)
  928. goto enqueue;
  929. #ifndef CONFIG_XMIT_THREAD_MODE
  930. if (check_nic_enough_desc(padapter, pattrib) == _FALSE)
  931. goto enqueue;
  932. if (rtw_xmit_ac_blocked(padapter) == _TRUE)
  933. goto enqueue;
  934. #endif
  935. if (DEV_STA_LG_NUM(padapter->dvobj))
  936. goto enqueue;
  937. #ifdef CONFIG_TX_AMSDU
  938. if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
  939. check_amsdu_tx_support(padapter)) {
  940. if (IS_AMSDU_AMPDU_VALID(pattrib))
  941. goto enqueue;
  942. }
  943. #endif
  944. pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
  945. if (pxmitbuf == NULL)
  946. goto enqueue;
  947. _exit_critical_bh(&pxmitpriv->lock, &irqL);
  948. pxmitframe->pxmitbuf = pxmitbuf;
  949. pxmitframe->buf_addr = pxmitbuf->pbuf;
  950. pxmitbuf->priv_data = pxmitframe;
  951. if (xmitframe_direct(padapter, pxmitframe) != _SUCCESS) {
  952. rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
  953. rtw_free_xmitframe(pxmitpriv, pxmitframe);
  954. }
  955. return _TRUE;
  956. enqueue:
  957. res = rtw_xmitframe_enqueue(padapter, pxmitframe);
  958. #ifdef CONFIG_TX_AMSDU
  959. if (res == _SUCCESS && tx_amsdu == 2) {
  960. amsdu_timeout = rtw_amsdu_get_timer_status(padapter, pattrib->priority);
  961. if (amsdu_timeout == RTW_AMSDU_TIMER_SETTING) {
  962. rtw_amsdu_cancel_timer(padapter, pattrib->priority);
  963. rtw_amsdu_set_timer_status(padapter, pattrib->priority,
  964. RTW_AMSDU_TIMER_UNSET);
  965. }
  966. }
  967. #endif
  968. _exit_critical_bh(&pxmitpriv->lock, &irqL);
  969. if (res != _SUCCESS) {
  970. rtw_free_xmitframe(pxmitpriv, pxmitframe);
  971. pxmitpriv->tx_drop++;
  972. return _TRUE;
  973. }
  974. #ifdef CONFIG_TX_AMSDU
  975. tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
  976. #endif
  977. return _FALSE;
  978. }
  979. s32 rtl8821ce_mgnt_xmit(_adapter *padapter, struct xmit_frame *pmgntframe)
  980. {
  981. #ifdef CONFIG_XMIT_THREAD_MODE
  982. struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
  983. struct pkt_attrib *pattrib = &pmgntframe->attrib;
  984. s32 ret = _SUCCESS;
  985. /* For FW download rsvd page and H2C pkt */
  986. if ((pattrib->qsel == QSLT_CMD) || (pattrib->qsel == QSLT_BEACON))
  987. ret = rtl8821ce_dump_xframe(padapter, pmgntframe);
  988. else
  989. enqueue_pending_xmitbuf(pxmitpriv, pmgntframe->pxmitbuf);
  990. return ret;
  991. #else
  992. return rtl8821ce_dump_xframe(padapter, pmgntframe);
  993. #endif
  994. }
  995. /*
  996. * Return
  997. * _TRUE dump packet directly ok
  998. * _FALSE temporary can't transmit packets to hardware
  999. */
  1000. s32 rtl8821ce_hal_xmit(_adapter *padapter, struct xmit_frame *pxmitframe)
  1001. {
  1002. return pre_xmitframe(padapter, pxmitframe);
  1003. }
  1004. s32 rtl8821ce_hal_xmitframe_enqueue(_adapter *padapter,
  1005. struct xmit_frame *pxmitframe)
  1006. {
  1007. struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
  1008. s32 err;
  1009. err = rtw_xmitframe_enqueue(padapter, pxmitframe);
  1010. if (err != _SUCCESS) {
  1011. rtw_free_xmitframe(pxmitpriv, pxmitframe);
  1012. pxmitpriv->tx_drop++;
  1013. } else {
  1014. #ifdef PLATFORM_LINUX
  1015. if (check_nic_enough_desc(padapter,
  1016. &pxmitframe->attrib) == _TRUE)
  1017. tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
  1018. #endif
  1019. }
  1020. return err;
  1021. }
  1022. int rtl8821ce_init_txbd_ring(_adapter *padapter, unsigned int q_idx,
  1023. unsigned int entries)
  1024. {
  1025. struct xmit_priv *t_priv = &padapter->xmitpriv;
  1026. struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
  1027. struct pci_dev *pdev = pdvobjpriv->ppcidev;
  1028. struct tx_buf_desc *txbd;
  1029. u8 *tx_desc;
  1030. dma_addr_t dma;
  1031. int i;
  1032. RTW_INFO("%s entries num:%d\n", __func__, entries);
  1033. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0))
  1034. txbd = pci_alloc_consistent(pdev, sizeof(*txbd) * entries, &dma);
  1035. #else
  1036. txbd = dma_alloc_coherent(&pdev->dev, sizeof(*txbd) * entries, &dma, GFP_KERNEL);
  1037. #endif
  1038. if (!txbd || (unsigned long)txbd & 0xFF) {
  1039. RTW_INFO("Cannot allocate TXBD (q_idx = %d)\n", q_idx);
  1040. return _FAIL;
  1041. }
  1042. _rtw_memset(txbd, 0, sizeof(*txbd) * entries);
  1043. t_priv->tx_ring[q_idx].buf_desc = txbd;
  1044. t_priv->tx_ring[q_idx].dma = dma;
  1045. t_priv->tx_ring[q_idx].idx = 0;
  1046. t_priv->tx_ring[q_idx].entries = entries;
  1047. _rtw_init_queue(&t_priv->tx_ring[q_idx].queue);
  1048. t_priv->tx_ring[q_idx].qlen = 0;
  1049. RTW_INFO("%s queue:%d, ring_addr:%p\n", __func__, q_idx, txbd);
  1050. return _SUCCESS;
  1051. }
  1052. void rtl8821ce_free_txbd_ring(_adapter *padapter, unsigned int prio)
  1053. {
  1054. struct xmit_priv *t_priv = &padapter->xmitpriv;
  1055. struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
  1056. struct pci_dev *pdev = pdvobjpriv->ppcidev;
  1057. struct rtw_tx_ring *ring = &t_priv->tx_ring[prio];
  1058. u8 *txbd;
  1059. struct xmit_buf *pxmitbuf;
  1060. dma_addr_t mapping;
  1061. while (ring->qlen) {
  1062. txbd = (u8 *)(&ring->buf_desc[ring->idx]);
  1063. SET_TX_BD_OWN(txbd, 0);
  1064. if (prio != BCN_QUEUE_INX)
  1065. ring->idx = (ring->idx + 1) % ring->entries;
  1066. pxmitbuf = rtl8821ce_dequeue_xmitbuf(ring);
  1067. if (pxmitbuf) {
  1068. mapping = GET_TX_BD_PHYSICAL_ADDR0_LOW(txbd);
  1069. #ifdef CONFIG_64BIT_DMA
  1070. mapping |= (dma_addr_t)GET_TX_BD_PHYSICAL_ADDR0_HIGH(txbd) << 32;
  1071. #endif
  1072. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0))
  1073. pci_unmap_single(pdev,
  1074. mapping,
  1075. pxmitbuf->len, PCI_DMA_TODEVICE);
  1076. #else
  1077. dma_unmap_single(&pdev->dev,
  1078. mapping,
  1079. pxmitbuf->len, DMA_TO_DEVICE);
  1080. #endif
  1081. rtw_free_xmitbuf(t_priv, pxmitbuf);
  1082. } else {
  1083. RTW_INFO("%s qlen=%d!=0,but have xmitbuf in pendingQ\n",
  1084. __func__, ring->qlen);
  1085. break;
  1086. }
  1087. }
  1088. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0))
  1089. pci_free_consistent(pdev, sizeof(*ring->buf_desc) * ring->entries,
  1090. ring->buf_desc, ring->dma);
  1091. #else
  1092. dma_free_coherent(&pdev->dev, sizeof(*ring->buf_desc) * ring->entries,
  1093. ring->buf_desc, ring->dma);
  1094. #endif
  1095. ring->buf_desc = NULL;
  1096. }
  1097. /*
  1098. * Draw a line to show queue status. For debug
  1099. * i: queue index / W:HW index / h:host index / .: enpty entry / *:ready to DMA
  1100. * Example: R- 3- 4- 8 ..iW***h..... (i=3,W=4,h=8,
  1101. * *** means 3 tx_desc is reaady to dma)
  1102. */
  1103. #ifdef BUF_DESC_DEBUG
  1104. static void _draw_queue(PADAPTER Adapter, int prio)
  1105. {
  1106. int i;
  1107. u8 line[TX_BD_NUM_8821CE + 1];
  1108. u16 hw, host;
  1109. u32 index, tmp_4bytes = 0;
  1110. struct xmit_priv *t_priv = &Adapter->xmitpriv;
  1111. struct rtw_tx_ring *ring = &t_priv->tx_ring[prio];
  1112. tmp_4bytes = rtw_read32(Adapter, get_txbd_rw_reg(prio));
  1113. hw = (u16)((tmp_4bytes >> 16) & 0x7ff);
  1114. host = (u16)(tmp_4bytes & 0x7ff);
  1115. index = ring->idx;
  1116. _rtw_memset(line, '.', TX_BD_NUM_8821CE);
  1117. /* ready to return to driver */
  1118. if (index <= hw) {
  1119. for (i = index; i < hw; i++)
  1120. line[i] = ':';
  1121. } else { /* wrap */
  1122. for (i = index; i < TX_BD_NUM_8821CE; i++)
  1123. line[i] = ':';
  1124. for (i = 0; i < hw; i++)
  1125. line[i] = ':';
  1126. }
  1127. /* ready to dma */
  1128. if (hw <= host) {
  1129. for (i = hw; i < host; i++)
  1130. line[i] = '*';
  1131. } else { /* wrap */
  1132. for (i = hw; i < TX_BD_NUM_8821CE; i++)
  1133. line[i] = '*';
  1134. for (i = 0; i < host; i++)
  1135. line[i] = '*';
  1136. }
  1137. line[index] = 'i'; /* software queue index */
  1138. line[host] = 'h'; /* host index */
  1139. line[hw] = 'W'; /* hardware index */
  1140. line[TX_BD_NUM_8821CE] = 0x0;
  1141. /* Q2:10-20-30: */
  1142. buf_desc_debug("Q%d:%02d-%02d-%02d %s\n", prio, index, hw, host, line);
  1143. }
  1144. #endif
  1145. /*
  1146. * Read pointer is h/w descriptor index
  1147. * Write pointer is host desciptor index: For tx side, if own bit is set in
  1148. * packet index n, host pointer (write pointer) point to index n + 1.
  1149. */
  1150. static u32 rtl8821ce_check_txdesc_closed(PADAPTER Adapter, u32 queue_idx,
  1151. struct rtw_tx_ring *ring)
  1152. {
  1153. /*
  1154. * hw_rp_cache is used to reduce REG access.
  1155. */
  1156. u32 tmp32;
  1157. /* bcn queue should not enter this function */
  1158. if (queue_idx == BCN_QUEUE_INX)
  1159. return _TRUE;
  1160. /* qlen == 0 --> don't need to process */
  1161. if (ring->qlen == 0)
  1162. return _FALSE;
  1163. /* sw_rp == hw_rp_cache --> sync hw_rp */
  1164. if (ring->idx == ring->hw_rp_cache) {
  1165. tmp32 = rtw_read32(Adapter, get_txbd_rw_reg(queue_idx));
  1166. ring->hw_rp_cache = (tmp32 >> 16) & 0x0FFF;
  1167. }
  1168. /* check if need to handle TXOK */
  1169. if (ring->idx == ring->hw_rp_cache)
  1170. return _FALSE;
  1171. return _TRUE;
  1172. }
  1173. #ifdef CONFIG_BCN_ICF
  1174. void rtl8821ce_tx_isr(PADAPTER Adapter, int prio)
  1175. {
  1176. struct xmit_priv *t_priv = &Adapter->xmitpriv;
  1177. struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(Adapter);
  1178. struct rtw_tx_ring *ring = &t_priv->tx_ring[prio];
  1179. struct xmit_buf *pxmitbuf;
  1180. u8 *tx_desc;
  1181. u16 tmp_4bytes;
  1182. u16 desc_idx_hw = 0, desc_idx_host = 0;
  1183. dma_addr_t mapping;
  1184. #ifdef CONFIG_LPS_LCLK
  1185. int index;
  1186. s32 enter32k = _SUCCESS;
  1187. struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(Adapter);
  1188. #endif
  1189. while (ring->qlen) {
  1190. tx_desc = (u8 *)&ring->buf_desc[ring->idx];
  1191. /* beacon use cmd buf Never run into here */
  1192. if (!rtl8821ce_check_txdesc_closed(Adapter, prio, ring))
  1193. return;
  1194. buf_desc_debug("TX: %s, q_idx = %d, tx_bd = %04x, close [%04x] r_idx [%04x]\n",
  1195. __func__, prio, (u32)tx_desc, ring->idx,
  1196. (ring->idx + 1) % ring->entries);
  1197. ring->idx = (ring->idx + 1) % ring->entries;
  1198. pxmitbuf = rtl8821ce_dequeue_xmitbuf(ring);
  1199. if (pxmitbuf) {
  1200. mapping = GET_TX_BD_PHYSICAL_ADDR0_LOW(tx_desc);
  1201. #ifdef CONFIG_64BIT_DMA
  1202. mapping |= (dma_addr_t)GET_TX_BD_PHYSICAL_ADDR0_HIGH(tx_desc) << 32;
  1203. #endif
  1204. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0))
  1205. pci_unmap_single(pdvobjpriv->ppcidev,
  1206. mapping,
  1207. pxmitbuf->len, PCI_DMA_TODEVICE);
  1208. #else
  1209. dma_unmap_single(&(pdvobjpriv->ppcidev)->dev,
  1210. mapping,
  1211. pxmitbuf->len, DMA_TO_DEVICE);
  1212. #endif
  1213. rtw_sctx_done(&pxmitbuf->sctx);
  1214. rtw_free_xmitbuf(&(pxmitbuf->padapter->xmitpriv),
  1215. pxmitbuf);
  1216. } else {
  1217. RTW_INFO("%s qlen=%d!=0,but have xmitbuf in pendingQ\n",
  1218. __func__, ring->qlen);
  1219. }
  1220. }
  1221. #ifdef CONFIG_LPS_LCLK
  1222. for (index = 0; index < HW_QUEUE_ENTRY; index++) {
  1223. if (index != BCN_QUEUE_INX) {
  1224. if (_rtw_queue_empty(&(Adapter->xmitpriv.tx_ring[index].queue)) == _FALSE) {
  1225. enter32k = _FAIL;
  1226. break;
  1227. }
  1228. }
  1229. }
  1230. if (enter32k)
  1231. _set_workitem(&(pwrpriv->dma_event));
  1232. #endif
  1233. if (check_tx_desc_resource(Adapter, prio)
  1234. && rtw_xmit_ac_blocked(Adapter) != _TRUE)
  1235. rtw_mi_xmit_tasklet_schedule(Adapter);
  1236. }
  1237. #else /* !CONFIG_BCN_ICF */
  1238. void rtl8821ce_tx_isr(PADAPTER Adapter, int prio)
  1239. {
  1240. struct xmit_priv *t_priv = &Adapter->xmitpriv;
  1241. struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(Adapter);
  1242. struct rtw_tx_ring *ring = &t_priv->tx_ring[prio];
  1243. struct xmit_buf *pxmitbuf;
  1244. u8 *tx_desc;
  1245. u16 tmp_4bytes;
  1246. u16 desc_idx_hw = 0, desc_idx_host = 0;
  1247. #ifdef CONFIG_LPS_LCLK
  1248. int index;
  1249. s32 enter32k = _SUCCESS;
  1250. struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(Adapter);
  1251. #endif
  1252. while (ring->qlen) {
  1253. tx_desc = (u8 *)&ring->buf_desc[ring->idx];
  1254. /*
  1255. * beacon packet will only use the first descriptor defautly,
  1256. * check register to see whether h/w has consumed buffer
  1257. * descriptor
  1258. */
  1259. if (prio != BCN_QUEUE_INX) {
  1260. if (!rtl8821ce_check_txdesc_closed(Adapter,
  1261. prio, ring->idx))
  1262. return;
  1263. buf_desc_debug("TX: %s, queue_idx = %d, tx_desc = %04x, close desc [%04x] and update ring->idx to [%04x]\n",
  1264. __func__, prio, (u32)tx_desc, ring->idx,
  1265. (ring->idx + 1) % ring->entries);
  1266. ring->idx = (ring->idx + 1) % ring->entries;
  1267. }
  1268. #if 0 /* 8821c change 00[31] to DISQSELSEQ */
  1269. else if (prio == BCN_QUEUE_INX)
  1270. SET_TX_DESC_OWN_92E(tx_desc, 0);
  1271. #endif
  1272. pxmitbuf = rtl8821ce_dequeue_xmitbuf(ring);
  1273. if (pxmitbuf) {
  1274. dma_addr_t mapping;
  1275. mapping = GET_TX_BD_PHYSICAL_ADDR0_LOW(tx_desc);
  1276. #ifdef CONFIG_64BIT_DMA
  1277. mapping |= (dma_addr_t)GET_TX_BD_PHYSICAL_ADDR0_HIGH(tx_desc) << 32;
  1278. #endif
  1279. pci_unmap_single(pdvobjpriv->ppcidev,
  1280. mapping,
  1281. pxmitbuf->len, PCI_DMA_TODEVICE);
  1282. rtw_sctx_done(&pxmitbuf->sctx);
  1283. rtw_free_xmitbuf(&(pxmitbuf->padapter->xmitpriv),
  1284. pxmitbuf);
  1285. } else {
  1286. RTW_INFO("%s qlen=%d!=0,but have xmitbuf in pendingQ\n",
  1287. __func__, ring->qlen);
  1288. }
  1289. }
  1290. #ifdef CONFIG_LPS_LCLK
  1291. for (index = 0; index < HW_QUEUE_ENTRY; index++) {
  1292. if (index != BCN_QUEUE_INX) {
  1293. if (_rtw_queue_empty(&(Adapter->xmitpriv.tx_ring[index].queue)) == _FALSE) {
  1294. enter32k = _FAIL;
  1295. break;
  1296. }
  1297. }
  1298. }
  1299. if (enter32k)
  1300. _set_workitem(&(pwrpriv->dma_event));
  1301. #endif
  1302. if ((prio != BCN_QUEUE_INX) && check_tx_desc_resource(Adapter, prio)
  1303. && rtw_xmit_ac_blocked(Adapter) != _TRUE)
  1304. rtw_mi_xmit_tasklet_schedule(Adapter);
  1305. }
  1306. #endif /* CONFIG_BCN_ICF */
  1307. #ifdef CONFIG_HOSTAPD_MLME
  1308. static void rtl8812ae_hostap_mgnt_xmit_cb(struct urb *urb)
  1309. {
  1310. #ifdef PLATFORM_LINUX
  1311. struct sk_buff *skb = (struct sk_buff *)urb->context;
  1312. dev_kfree_skb_any(skb);
  1313. #endif
  1314. }
  1315. s32 rtl8821ce_hostap_mgnt_xmit_entry(_adapter *padapter, _pkt *pkt)
  1316. {
  1317. #ifdef PLATFORM_LINUX
  1318. u16 fc;
  1319. int rc, len, pipe;
  1320. unsigned int bmcst, tid, qsel;
  1321. struct sk_buff *skb, *pxmit_skb;
  1322. struct urb *urb;
  1323. unsigned char *pxmitbuf;
  1324. struct tx_desc *ptxdesc;
  1325. struct rtw_ieee80211_hdr *tx_hdr;
  1326. struct hostapd_priv *phostapdpriv = padapter->phostapdpriv;
  1327. struct net_device *pnetdev = padapter->pnetdev;
  1328. HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter);
  1329. struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
  1330. skb = pkt;
  1331. len = skb->len;
  1332. tx_hdr = (struct rtw_ieee80211_hdr *)(skb->data);
  1333. fc = le16_to_cpu(tx_hdr->frame_ctl);
  1334. bmcst = IS_MCAST(tx_hdr->addr1);
  1335. if ((fc & RTW_IEEE80211_FCTL_FTYPE) != RTW_IEEE80211_FTYPE_MGMT)
  1336. goto _exit;
  1337. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
  1338. /* http://www.mail-archive.com/netdev@vger.kernel.org/msg17214.html */
  1339. pxmit_skb = dev_alloc_skb(len + TXDESC_SIZE);
  1340. #else
  1341. pxmit_skb = netdev_alloc_skb(pnetdev, len + TXDESC_SIZE);
  1342. #endif
  1343. if (!pxmit_skb)
  1344. goto _exit;
  1345. pxmitbuf = pxmit_skb->data;
  1346. urb = usb_alloc_urb(0, GFP_ATOMIC);
  1347. if (!urb)
  1348. goto _exit;
  1349. /* ----- fill tx desc ----- */
  1350. ptxdesc = (struct tx_desc *)pxmitbuf;
  1351. _rtw_memset(ptxdesc, 0, sizeof(*ptxdesc));
  1352. /* offset 0 */
  1353. ptxdesc->txdw0 |= cpu_to_le32(len & 0x0000ffff);
  1354. /* default = 32 bytes for TX Desc */
  1355. ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE + OFFSET_SZ) << OFFSET_SHT) &
  1356. 0x00ff0000);
  1357. ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
  1358. if (bmcst)
  1359. ptxdesc->txdw0 |= cpu_to_le32(BIT(24));
  1360. /* offset 4 */
  1361. ptxdesc->txdw1 |= cpu_to_le32(0x00);/* MAC_ID */
  1362. ptxdesc->txdw1 |= cpu_to_le32((0x12 << QSEL_SHT) & 0x00001f00);
  1363. ptxdesc->txdw1 |= cpu_to_le32((0x06 << 16) & 0x000f0000);/* b mode */
  1364. /* offset 8 */
  1365. /* offset 12 */
  1366. ptxdesc->txdw3 |= cpu_to_le32((le16_to_cpu(tx_hdr->seq_ctl) << 16) &
  1367. 0xffff0000);
  1368. /* offset 16 */
  1369. ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
  1370. /* offset 20 */
  1371. rtl8188e_cal_txdesc_chksum(ptxdesc);
  1372. /* ----- end of fill tx desc ----- */
  1373. skb_put(pxmit_skb, len + TXDESC_SIZE);
  1374. pxmitbuf = pxmitbuf + TXDESC_SIZE;
  1375. _rtw_memcpy(pxmitbuf, skb->data, len);
  1376. /* ----- prepare urb for submit ----- */
  1377. /* translate DMA FIFO addr to pipehandle */
  1378. /*pipe = ffaddr2pipehdl(pdvobj, MGT_QUEUE_INX);*/
  1379. pipe = usb_sndbulkpipe(pdvobj->pusbdev,
  1380. pHalData->Queue2EPNum[(u8)MGT_QUEUE_INX] & 0x0f);
  1381. usb_fill_bulk_urb(urb, pdvobj->pusbdev, pipe, pxmit_skb->data,
  1382. pxmit_skb->len, rtl8188ee_hostap_mgnt_xmit_cb, pxmit_skb);
  1383. urb->transfer_flags |= URB_ZERO_PACKET;
  1384. usb_anchor_urb(urb, &phostapdpriv->anchored);
  1385. rc = usb_submit_urb(urb, GFP_ATOMIC);
  1386. if (rc < 0) {
  1387. usb_unanchor_urb(urb);
  1388. kfree_skb(skb);
  1389. }
  1390. usb_free_urb(urb);
  1391. _exit:
  1392. dev_kfree_skb_any(skb);
  1393. #endif
  1394. return 0;
  1395. }
  1396. #endif