diff options
author | Chris Manton <cmanton@google.com> | 2014-05-06 10:35:42 -0700 |
---|---|---|
committer | Andre Eisenbach <eisenbach@google.com> | 2015-03-16 16:51:29 -0700 |
commit | fe7216ca12f91baae733e7c93063db73121af308 (patch) | |
tree | 03a66f188e1fd2e8daa48d5be04220b80422d7bf /stack | |
parent | 284440f0c9f9fe15b162e37ef2bf6af439407447 (diff) | |
download | android_system_bt-fe7216ca12f91baae733e7c93063db73121af308.tar.gz android_system_bt-fe7216ca12f91baae733e7c93063db73121af308.tar.bz2 android_system_bt-fe7216ca12f91baae733e7c93063db73121af308.zip |
Enforce GKI API buffer usage
Also add another API GKI_queue_length(BUFFER_Q *)
Diffstat (limited to 'stack')
-rw-r--r-- | stack/avct/avct_lcb_act.c | 2 | ||||
-rw-r--r-- | stack/avdt/avdt_scb_act.c | 2 | ||||
-rw-r--r-- | stack/bnep/bnep_api.c | 6 | ||||
-rw-r--r-- | stack/bnep/bnep_utils.c | 4 | ||||
-rw-r--r-- | stack/btm/btm_ble_bgconn.c | 2 | ||||
-rw-r--r-- | stack/btm/btm_pm.c | 2 | ||||
-rw-r--r-- | stack/btu/btu_hcif.c | 4 | ||||
-rw-r--r-- | stack/btu/btu_task.c | 4 | ||||
-rw-r--r-- | stack/gap/gap_conn.c | 4 | ||||
-rw-r--r-- | stack/gatt/gatt_auth.c | 4 | ||||
-rw-r--r-- | stack/gatt/gatt_db.c | 2 | ||||
-rw-r--r-- | stack/gatt/gatt_main.c | 2 | ||||
-rwxr-xr-x | stack/gatt/gatt_sr.c | 6 | ||||
-rw-r--r-- | stack/gatt/gatt_utils.c | 10 | ||||
-rw-r--r-- | stack/l2cap/l2c_api.c | 12 | ||||
-rw-r--r-- | stack/l2cap/l2c_ble.c | 2 | ||||
-rw-r--r-- | stack/l2cap/l2c_csm.c | 4 | ||||
-rw-r--r-- | stack/l2cap/l2c_fcr.c | 66 | ||||
-rw-r--r-- | stack/l2cap/l2c_link.c | 8 | ||||
-rwxr-xr-x | stack/l2cap/l2c_main.c | 12 | ||||
-rw-r--r-- | stack/l2cap/l2c_utils.c | 22 | ||||
-rw-r--r-- | stack/rfcomm/port_api.c | 14 | ||||
-rw-r--r-- | stack/rfcomm/port_rfc.c | 2 | ||||
-rw-r--r-- | stack/rfcomm/port_utils.c | 8 | ||||
-rw-r--r-- | stack/rfcomm/rfc_port_fsm.c | 2 |
25 files changed, 105 insertions, 101 deletions
diff --git a/stack/avct/avct_lcb_act.c b/stack/avct/avct_lcb_act.c index 17d5d3d30..4ca1745a1 100644 --- a/stack/avct/avct_lcb_act.c +++ b/stack/avct/avct_lcb_act.c @@ -618,7 +618,7 @@ void avct_lcb_send_msg(tAVCT_LCB *p_lcb, tAVCT_LCB_EVT *p_data) pkt_type = AVCT_PKT_TYPE_END; } } - AVCT_TRACE_DEBUG ("avct_lcb_send_msg tx_q_count:%d", p_lcb->tx_q.count); + AVCT_TRACE_DEBUG ("avct_lcb_send_msg tx_q_count:%d", GKI_queue_length(&p_lcb->tx_q)); return; } diff --git a/stack/avdt/avdt_scb_act.c b/stack/avdt/avdt_scb_act.c index 72dd3cd46..8c46f7b69 100644 --- a/stack/avdt/avdt_scb_act.c +++ b/stack/avdt/avdt_scb_act.c @@ -1421,7 +1421,7 @@ void avdt_scb_snd_stream_close(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data) BT_HDR *p_frag; AVDT_TRACE_WARNING("avdt_scb_snd_stream_close c:%d, off:%d", - p_scb->frag_q.count, p_scb->frag_off); + GKI_queue_length(&p_scb->frag_q), p_scb->frag_off); /* clean fragments queue */ while((p_frag = (BT_HDR*)GKI_dequeue (&p_scb->frag_q)) != NULL) GKI_freebuf(p_frag); diff --git a/stack/bnep/bnep_api.c b/stack/bnep/bnep_api.c index b1d8c7c2d..87b2cb5cf 100644 --- a/stack/bnep/bnep_api.c +++ b/stack/bnep/bnep_api.c @@ -432,7 +432,7 @@ tBNEP_RESULT BNEP_WriteBuf (UINT16 handle, } /* Check transmit queue */ - if (p_bcb->xmit_q.count >= BNEP_MAX_XMITQ_DEPTH) + if (GKI_queue_length(&p_bcb->xmit_q) >= BNEP_MAX_XMITQ_DEPTH) { GKI_freebuf (p_buf); return (BNEP_Q_SIZE_EXCEEDED); @@ -538,7 +538,7 @@ tBNEP_RESULT BNEP_Write (UINT16 handle, } /* Check transmit queue */ - if (p_bcb->xmit_q.count >= BNEP_MAX_XMITQ_DEPTH) + if (GKI_queue_length(&p_bcb->xmit_q) >= BNEP_MAX_XMITQ_DEPTH) return (BNEP_Q_SIZE_EXCEEDED); /* Get a buffer to copy teh data into */ @@ -762,7 +762,7 @@ tBNEP_RESULT BNEP_GetStatus (UINT16 handle, tBNEP_STATUS *p_status) p_status->con_status = BNEP_STATUS_CONNECTED; p_status->l2cap_cid = p_bcb->l2cap_cid; p_status->rem_mtu_size = p_bcb->rem_mtu_size; - p_status->xmit_q_depth = p_bcb->xmit_q.count; + p_status->xmit_q_depth = GKI_queue_length(&p_bcb->xmit_q); p_status->sent_num_filters = p_bcb->sent_num_filters; p_status->sent_mcast_filters = p_bcb->sent_mcast_filters; p_status->rcvd_num_filters = p_bcb->rcvd_num_filters; diff --git a/stack/bnep/bnep_utils.c b/stack/bnep/bnep_utils.c index 92061d010..89c471cbc 100644 --- a/stack/bnep/bnep_utils.c +++ b/stack/bnep/bnep_utils.c @@ -152,7 +152,7 @@ void bnepu_release_bcb (tBNEP_CONN *p_bcb) p_bcb->p_pending_data = NULL; /* Free transmit queue */ - while (p_bcb->xmit_q.count) + while (!GKI_queue_is_empty(&p_bcb->xmit_q)) { GKI_freebuf (GKI_dequeue (&p_bcb->xmit_q)); } @@ -455,7 +455,7 @@ void bnepu_check_send_packet (tBNEP_CONN *p_bcb, BT_HDR *p_buf) BNEP_TRACE_EVENT ("BNEP - bnepu_check_send_packet for CID: 0x%x", p_bcb->l2cap_cid); if (p_bcb->con_flags & BNEP_FLAGS_L2CAP_CONGESTED) { - if (p_bcb->xmit_q.count >= BNEP_MAX_XMITQ_DEPTH) + if (GKI_queue_length(&p_bcb->xmit_q) >= BNEP_MAX_XMITQ_DEPTH) { BNEP_TRACE_EVENT ("BNEP - congested, dropping buf, CID: 0x%x", p_bcb->l2cap_cid); diff --git a/stack/btm/btm_ble_bgconn.c b/stack/btm/btm_ble_bgconn.c index b33aa2e2c..2fabe23d6 100644 --- a/stack/btm/btm_ble_bgconn.c +++ b/stack/btm/btm_ble_bgconn.c @@ -705,7 +705,7 @@ BOOLEAN btm_send_pending_direct_conn(void ) tBTM_BLE_CONN_REQ *p_req; BOOLEAN rt = FALSE; - if ( btm_cb.ble_ctr_cb.conn_pending_q.count ) + if (!GKI_queue_is_empty(&btm_cb.ble_ctr_cb.conn_pending_q)) { p_req = (tBTM_BLE_CONN_REQ*)GKI_dequeue (&btm_cb.ble_ctr_cb.conn_pending_q); diff --git a/stack/btm/btm_pm.c b/stack/btm/btm_pm.c index 76bfc048b..446c88dfc 100644 --- a/stack/btm/btm_pm.c +++ b/stack/btm/btm_pm.c @@ -1053,7 +1053,7 @@ BOOLEAN btm_pm_device_in_scan_state(void) /* Scan state-paging, inquiry, and trying to connect */ /* Check for paging */ - if (btm_cb.is_paging || btm_cb.page_queue.count > 0 || + if (btm_cb.is_paging || GKI_queue_length(&btm_cb.page_queue) > 0 || BTM_BL_PAGING_STARTED == btm_cb.busy_level) { BTM_TRACE_DEBUG("btm_pm_device_in_scan_state- paging"); diff --git a/stack/btu/btu_hcif.c b/stack/btu/btu_hcif.c index 9ec8dd3cb..aea9800fc 100644 --- a/stack/btu/btu_hcif.c +++ b/stack/btu/btu_hcif.c @@ -460,7 +460,7 @@ void btu_hcif_send_cmd (UINT8 controller_id, BT_HDR *p_buf) #endif /* If there are already commands in the queue, then enqueue this command */ - if ((p_buf) && (p_hci_cmd_cb->cmd_xmit_q.count)) + if ((p_buf) && (!GKI_queue_is_empty(&p_hci_cmd_cb->cmd_xmit_q))) { GKI_enqueue (&(p_hci_cmd_cb->cmd_xmit_q), p_buf); p_buf = NULL; @@ -471,7 +471,7 @@ void btu_hcif_send_cmd (UINT8 controller_id, BT_HDR *p_buf) && (p_hci_cmd_cb->cmd_window == 0) && (btm_cb.devcb.state == BTM_DEV_STATE_WAIT_RESET_CMPLT)) ) { - p_hci_cmd_cb->cmd_window = p_hci_cmd_cb->cmd_xmit_q.count + 1; + p_hci_cmd_cb->cmd_window = GKI_queue_length(&p_hci_cmd_cb->cmd_xmit_q) + 1; } /* See if we can send anything */ diff --git a/stack/btu/btu_task.c b/stack/btu/btu_task.c index 8d1fd04df..9045dac7e 100644 --- a/stack/btu/btu_task.c +++ b/stack/btu/btu_task.c @@ -921,8 +921,8 @@ void btu_stop_timer_oneshot(TIMER_LIST_ENT *p_tle) { *******************************************************************************/ void btu_check_bt_sleep (void) { - if ((btu_cb.hci_cmd_cb[LOCAL_BR_EDR_CONTROLLER_ID].cmd_cmpl_q.count == 0) - &&(btu_cb.hci_cmd_cb[LOCAL_BR_EDR_CONTROLLER_ID].cmd_xmit_q.count == 0)) + if ((GKI_queue_is_empty(&btu_cb.hci_cmd_cb[LOCAL_BR_EDR_CONTROLLER_ID].cmd_cmpl_q) + && GKI_queue_is_empty(&btu_cb.hci_cmd_cb[LOCAL_BR_EDR_CONTROLLER_ID].cmd_xmit_q))) { if (l2cb.controller_xmit_window == l2cb.num_lm_acl_bufs) { diff --git a/stack/gap/gap_conn.c b/stack/gap/gap_conn.c index d1e96d88b..61f88928b 100644 --- a/stack/gap/gap_conn.c +++ b/stack/gap/gap_conn.c @@ -1173,10 +1173,10 @@ static void gap_release_ccb (tGAP_CCB *p_ccb) /* Drop any buffers we may be holding */ p_ccb->rx_queue_size = 0; - while (p_ccb->rx_queue.p_first) + while (!GKI_queue_is_empty(&p_ccb->rx_queue)) GKI_freebuf (GKI_dequeue (&p_ccb->rx_queue)); - while (p_ccb->tx_queue.p_first) + while (!GKI_queue_is_empty(&p_ccb->tx_queue)) GKI_freebuf (GKI_dequeue (&p_ccb->tx_queue)); p_ccb->con_state = GAP_CCB_STATE_IDLE; diff --git a/stack/gatt/gatt_auth.c b/stack/gatt/gatt_auth.c index 10cf76e88..ae5214592 100644 --- a/stack/gatt/gatt_auth.c +++ b/stack/gatt/gatt_auth.c @@ -192,7 +192,7 @@ void gatt_enc_cmpl_cback(BD_ADDR bd_addr, tBT_TRANSPORT transport, void *p_ref_d gatt_sec_check_complete(status , p_buf->p_clcb, p_tcb->sec_act); GKI_freebuf(p_buf); /* start all other pending operation in queue */ - count = p_tcb->pending_enc_clcb.count; + count = GKI_queue_length(&p_tcb->pending_enc_clcb); for (; count > 0; count --) { if ((p_buf = (tGATT_PENDING_ENC_CLCB *)GKI_dequeue (&p_tcb->pending_enc_clcb)) != NULL) @@ -246,7 +246,7 @@ void gatt_notify_enc_cmpl(BD_ADDR bd_addr) { gatt_set_sec_act(p_tcb, GATT_SEC_NONE); - count = p_tcb->pending_enc_clcb.count; + count = GKI_queue_length(&p_tcb->pending_enc_clcb); for (; count > 0; count --) { diff --git a/stack/gatt/gatt_db.c b/stack/gatt/gatt_db.c index b7887c975..b12ff5235 100644 --- a/stack/gatt/gatt_db.c +++ b/stack/gatt/gatt_db.c @@ -62,6 +62,8 @@ static tGATT_STATUS gatts_send_app_read_request(tGATT_TCB *p_tcb, UINT8 op_code, BOOLEAN gatts_init_service_db (tGATT_SVC_DB *p_db, tBT_UUID *p_service, BOOLEAN is_pri, UINT16 s_hdl, UINT16 num_handle) { + GKI_init_q(&p_db->svc_buffer); + if (!allocate_svc_db_buf(p_db)) { GATT_TRACE_ERROR("gatts_init_service_db failed, no resources"); diff --git a/stack/gatt/gatt_main.c b/stack/gatt/gatt_main.c index 3d9603514..f0a0229ec 100644 --- a/stack/gatt/gatt_main.c +++ b/stack/gatt/gatt_main.c @@ -102,6 +102,8 @@ void gatt_init (void) #endif gatt_cb.def_mtu_size = GATT_DEF_BLE_MTU_SIZE; GKI_init_q (&gatt_cb.sign_op_queue); + GKI_init_q (&gatt_cb.srv_chg_clt_q); + GKI_init_q (&gatt_cb.pending_new_srv_start_q); /* First, register fixed L2CAP channel for ATT over BLE */ fixed_reg.fixed_chnl_opts.mode = L2CAP_FCR_BASIC_MODE; fixed_reg.fixed_chnl_opts.max_transmit = 0xFF; diff --git a/stack/gatt/gatt_sr.c b/stack/gatt/gatt_sr.c index 21997946b..5f9ddf01b 100755 --- a/stack/gatt/gatt_sr.c +++ b/stack/gatt/gatt_sr.c @@ -106,7 +106,7 @@ void gatt_dequeue_sr_cmd (tGATT_TCB *p_tcb) GKI_freebuf (p_tcb->sr_cmd.p_rsp_msg); } - while (p_tcb->sr_cmd.multi_rsp_q.p_first) + while (GKI_getfirst(&p_tcb->sr_cmd.multi_rsp_q)) GKI_freebuf (GKI_dequeue (&p_tcb->sr_cmd.multi_rsp_q)); memset( &p_tcb->sr_cmd, 0, sizeof(tGATT_SR_CMD)); } @@ -145,9 +145,9 @@ static BOOLEAN process_read_multi_rsp (tGATT_SR_CMD *p_cmd, tGATT_STATUS status, if (status == GATT_SUCCESS) { GATT_TRACE_DEBUG ("Multi read count=%d num_hdls=%d", - p_cmd->multi_rsp_q.count, p_cmd->multi_req.num_handles); + GKI_queue_length(&p_cmd->multi_rsp_q), p_cmd->multi_req.num_handles); /* Wait till we get all the responses */ - if (p_cmd->multi_rsp_q.count == p_cmd->multi_req.num_handles) + if (GKI_queue_length(&p_cmd->multi_rsp_q) == p_cmd->multi_req.num_handles) { len = sizeof(BT_HDR) + L2CAP_MIN_OFFSET + mtu; if ((p_buf = (BT_HDR *)GKI_getbuf(len)) == NULL) diff --git a/stack/gatt/gatt_utils.c b/stack/gatt/gatt_utils.c index 0e841a92c..f0658ea2a 100644 --- a/stack/gatt/gatt_utils.c +++ b/stack/gatt/gatt_utils.c @@ -93,7 +93,7 @@ void gatt_free_pending_ind(tGATT_TCB *p_tcb) { GATT_TRACE_DEBUG("gatt_free_pending_ind"); /* release all queued indications */ - while (p_tcb->pending_ind_q.p_first) + while (!GKI_queue_is_empty(&p_tcb->pending_ind_q)) GKI_freebuf (GKI_dequeue (&p_tcb->pending_ind_q)); } @@ -110,7 +110,7 @@ void gatt_free_pending_enc_queue(tGATT_TCB *p_tcb) { GATT_TRACE_DEBUG("gatt_free_pending_enc_queue"); /* release all queued indications */ - while (p_tcb->pending_enc_clcb.p_first) + while (!GKI_queue_is_empty(&p_tcb->pending_enc_clcb)) GKI_freebuf (GKI_dequeue (&p_tcb->pending_enc_clcb)); } @@ -373,7 +373,7 @@ void gatt_free_hdl_buffer(tGATT_HDL_LIST_ELEM *p) if (p) { - while (p->svc_db.svc_buffer.p_first) + while (!GKI_queue_is_empty(&p->svc_db.svc_buffer)) GKI_freebuf (GKI_dequeue (&p->svc_db.svc_buffer)); memset(p, 0, sizeof(tGATT_HDL_LIST_ELEM)); } @@ -397,7 +397,7 @@ void gatt_free_srvc_db_buffer_app_id(tBT_UUID *p_app_id) { if (memcmp(p_app_id, &p_elem->asgn_range.app_uuid128, sizeof(tBT_UUID)) == 0) { - while (p_elem->svc_db.svc_buffer.p_first) + while (!GKI_queue_is_empty(&p_elem->svc_db.svc_buffer)) GKI_freebuf (GKI_dequeue (&p_elem->svc_db.svc_buffer)); p_elem->svc_db.mem_free = 0; @@ -1363,7 +1363,7 @@ UINT8 gatt_sr_alloc_rcb(tGATT_HDL_LIST_ELEM *p_list ) p_sreg->e_hdl = p_list->asgn_range.e_handle; p_sreg->p_db = &p_list->svc_db; - GATT_TRACE_DEBUG ("total GKI buffer in db [%d]",p_sreg->p_db->svc_buffer.count); + GATT_TRACE_DEBUG ("total GKI buffer in db [%d]",GKI_queue_length(&p_sreg->p_db->svc_buffer)); break; } } diff --git a/stack/l2cap/l2c_api.c b/stack/l2cap/l2c_api.c index 3d228d8c0..be7829ce8 100644 --- a/stack/l2cap/l2c_api.c +++ b/stack/l2cap/l2c_api.c @@ -1484,7 +1484,7 @@ UINT16 L2CA_SendFixedChnlData (UINT16 fixed_cid, BD_ADDR rem_bda, BT_HDR *p_buf) { L2CAP_TRACE_ERROR ("L2CAP - CID: 0x%04x cannot send, already congested \ xmit_hold_q.count: %u buff_quota: %u", fixed_cid, - p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->xmit_hold_q.count, + GKI_queue_length(&p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->xmit_hold_q), p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->buff_quota); GKI_freebuf (p_buf); return (L2CAP_DW_FAILED); @@ -1810,7 +1810,7 @@ UINT16 L2CA_FlushChannel (UINT16 lcid, UINT16 num_to_flush) if (num_to_flush != L2CAP_FLUSH_CHANS_GET) { L2CAP_TRACE_API ("L2CA_FlushChannel (FLUSH) CID: 0x%04x NumToFlush: %d QC: %u pFirst: 0x%08x", - lcid, num_to_flush, p_ccb->xmit_hold_q.count, p_ccb->xmit_hold_q.p_first); + lcid, num_to_flush, GKI_queue_length(&p_ccb->xmit_hold_q), GKI_getfirst(&p_ccb->xmit_hold_q)); } else { @@ -1838,7 +1838,7 @@ UINT16 L2CA_FlushChannel (UINT16 lcid, UINT16 num_to_flush) } #endif - p_buf = (BT_HDR *)p_lcb->link_xmit_data_q.p_first; + p_buf = (BT_HDR *)GKI_getfirst(&p_lcb->link_xmit_data_q); /* First flush the number we are asked to flush */ while ((p_buf != NULL) && (num_to_flush != 0)) @@ -1860,7 +1860,7 @@ UINT16 L2CA_FlushChannel (UINT16 lcid, UINT16 num_to_flush) } /* If needed, flush buffers in the CCB xmit hold queue */ - while ( (num_to_flush != 0) && (p_ccb->xmit_hold_q.count != 0) ) + while ( (num_to_flush != 0) && (!GKI_queue_is_empty(&p_ccb->xmit_hold_q))) { p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q); if (p_buf) @@ -1874,7 +1874,7 @@ UINT16 L2CA_FlushChannel (UINT16 lcid, UINT16 num_to_flush) (*p_ccb->p_rcb->api.pL2CA_TxComplete_Cb)(p_ccb->local_cid, num_flushed2); /* Now count how many are left */ - p_buf = (BT_HDR *)p_lcb->link_xmit_data_q.p_first; + p_buf = (BT_HDR *)GKI_getfirst(&p_lcb->link_xmit_data_q); while (p_buf != NULL) { @@ -1885,7 +1885,7 @@ UINT16 L2CA_FlushChannel (UINT16 lcid, UINT16 num_to_flush) } /* Add in the number in the CCB xmit queue */ - num_left += p_ccb->xmit_hold_q.count; + num_left += GKI_queue_length(&p_ccb->xmit_hold_q); /* Return the local number of buffers left for the CID */ L2CAP_TRACE_DEBUG ("L2CA_FlushChannel() flushed: %u + %u, num_left: %u", num_flushed1, num_flushed2, num_left); diff --git a/stack/l2cap/l2c_ble.c b/stack/l2cap/l2c_ble.c index d3ba6e229..f9c75e2c6 100644 --- a/stack/l2cap/l2c_ble.c +++ b/stack/l2cap/l2c_ble.c @@ -915,7 +915,7 @@ void l2c_ble_link_adjust_allocation (void) /* this link may have sent anything but some other link sent packets so */ /* so we may need a timer to kick off this link's transmissions. */ if ( (p_lcb->link_state == LST_CONNECTED) - && (p_lcb->link_xmit_data_q.count) + && (GKI_queue_length(&p_lcb->link_xmit_data_q)) && (p_lcb->sent_not_acked < p_lcb->link_xmit_quota) ) btu_start_timer (&p_lcb->timer_entry, BTU_TTYPE_L2CAP_LINK, L2CAP_LINK_FLOW_CONTROL_TOUT); } diff --git a/stack/l2cap/l2c_csm.c b/stack/l2cap/l2c_csm.c index fa261b211..5bf268fc6 100644 --- a/stack/l2cap/l2c_csm.c +++ b/stack/l2cap/l2c_csm.c @@ -789,7 +789,7 @@ static void l2c_csm_config (tL2C_CCB *p_ccb, UINT16 event, void *p_data) p_ccb->fcrb.connect_tick_count = GKI_get_os_tick_count(); #endif /* See if we can forward anything on the hold queue */ - if (p_ccb->xmit_hold_q.count) + if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) { l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL); } @@ -872,7 +872,7 @@ static void l2c_csm_config (tL2C_CCB *p_ccb, UINT16 event, void *p_data) #endif /* See if we can forward anything on the hold queue */ - if ( (p_ccb->chnl_state == CST_OPEN) && (p_ccb->xmit_hold_q.count) ) + if ( (p_ccb->chnl_state == CST_OPEN) && (!GKI_queue_is_empty(&p_ccb->xmit_hold_q))) { l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL); } diff --git a/stack/l2cap/l2c_fcr.c b/stack/l2cap/l2c_fcr.c index 6e12607cd..334730d46 100644 --- a/stack/l2cap/l2c_fcr.c +++ b/stack/l2cap/l2c_fcr.c @@ -233,13 +233,13 @@ void l2c_fcr_cleanup (tL2C_CCB *p_ccb) if (p_fcrb->p_rx_sdu) GKI_freebuf (p_fcrb->p_rx_sdu); - while (p_fcrb->waiting_for_ack_q.p_first) + while (!GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q)) GKI_freebuf (GKI_dequeue (&p_fcrb->waiting_for_ack_q)); - while (p_fcrb->srej_rcv_hold_q.p_first) + while (!GKI_queue_is_empty(&p_fcrb->srej_rcv_hold_q)) GKI_freebuf (GKI_dequeue (&p_fcrb->srej_rcv_hold_q)); - while (p_fcrb->retrans_q.p_first) + while (!GKI_queue_is_empty(&p_fcrb->retrans_q)) GKI_freebuf (GKI_dequeue (&p_fcrb->retrans_q)); btu_stop_quick_timer (&p_fcrb->ack_timer); @@ -390,10 +390,10 @@ BOOLEAN l2c_fcr_is_flow_controlled (tL2C_CCB *p_ccb) { /* Check if remote side flowed us off or the transmit window is full */ if ( (p_ccb->fcrb.remote_busy == TRUE) - || (p_ccb->fcrb.waiting_for_ack_q.count >= p_ccb->peer_cfg.fcr.tx_win_sz) ) + || (GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q) >= p_ccb->peer_cfg.fcr.tx_win_sz) ) { #if (L2CAP_ERTM_STATS == TRUE) - if (p_ccb->xmit_hold_q.count != 0) + if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) { p_ccb->fcrb.xmit_window_closed++; @@ -699,7 +699,7 @@ void l2c_fcr_proc_pdu (tL2C_CCB *p_ccb, BT_HDR *p_buf) L2CAP_TRACE_EVENT (" eRTM Rx Nxt_tx_seq %u, Lst_rx_ack %u, Nxt_seq_exp %u, Lst_ack_snt %u, wt_q.cnt %u, tries %u", p_ccb->fcrb.next_tx_seq, p_ccb->fcrb.last_rx_ack, p_ccb->fcrb.next_seq_expected, - p_ccb->fcrb.last_ack_sent, p_ccb->fcrb.waiting_for_ack_q.count, p_ccb->fcrb.num_tries); + p_ccb->fcrb.last_ack_sent, GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q), p_ccb->fcrb.num_tries); #endif /* BT_TRACE_VERBOSE */ @@ -768,7 +768,7 @@ void l2c_fcr_proc_pdu (tL2C_CCB *p_ccb, BT_HDR *p_buf) if (ctrl_word & L2CAP_FCR_S_FRAME_BIT) ctrl_word &= ~L2CAP_FCR_P_BIT; - if (p_ccb->fcrb.waiting_for_ack_q.count == 0) + if (GKI_queue_is_empty(&p_ccb->fcrb.waiting_for_ack_q)) p_ccb->fcrb.num_tries = 0; l2c_fcr_stop_timer (p_ccb); @@ -797,7 +797,7 @@ void l2c_fcr_proc_pdu (tL2C_CCB *p_ccb, BT_HDR *p_buf) return; /* If we have some buffers held while doing SREJ, and SREJ has cleared, process them now */ - if ( (!p_ccb->fcrb.local_busy) && (!p_ccb->fcrb.srej_sent) && (p_ccb->fcrb.srej_rcv_hold_q.count > 0) ) + if ( (!p_ccb->fcrb.local_busy) && (!p_ccb->fcrb.srej_sent) && (!GKI_queue_is_empty(&p_ccb->fcrb.srej_rcv_hold_q))) { BUFFER_Q temp_q = p_ccb->fcrb.srej_rcv_hold_q; @@ -845,7 +845,7 @@ void l2c_fcr_proc_pdu (tL2C_CCB *p_ccb, BT_HDR *p_buf) } /* If a window has opened, check if we can send any more packets */ - if ( (p_ccb->fcrb.retrans_q.count || p_ccb->xmit_hold_q.count) + if ( (!GKI_queue_is_empty(&p_ccb->fcrb.retrans_q) || !GKI_queue_is_empty(&p_ccb->xmit_hold_q)) && (p_ccb->fcrb.wait_ack == FALSE) && (l2c_fcr_is_flow_controlled (p_ccb) == FALSE) ) { @@ -866,7 +866,7 @@ void l2c_fcr_proc_tout (tL2C_CCB *p_ccb) { L2CAP_TRACE_DEBUG ("l2c_fcr_proc_tout: CID: 0x%04x num_tries: %u (max: %u) wait_ack: %u ack_q_count: %u", p_ccb->local_cid, p_ccb->fcrb.num_tries, p_ccb->peer_cfg.fcr.max_transmit, - p_ccb->fcrb.wait_ack, p_ccb->fcrb.waiting_for_ack_q.count); + p_ccb->fcrb.wait_ack, GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q)); #if (L2CAP_ERTM_STATS == TRUE) p_ccb->fcrb.retrans_touts++; @@ -939,7 +939,7 @@ static BOOLEAN process_reqseq (tL2C_CCB *p_ccb, UINT16 ctrl_word) && ((ctrl_word & L2CAP_FCR_P_BIT) == 0) ) { /* If anything still waiting for ack, restart the timer if it was stopped */ - if (p_fcrb->waiting_for_ack_q.count) + if (!GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q)) l2c_fcr_start_timer (p_ccb); return (TRUE); @@ -951,11 +951,11 @@ static BOOLEAN process_reqseq (tL2C_CCB *p_ccb, UINT16 ctrl_word) num_bufs_acked = (req_seq - p_fcrb->last_rx_ack) & L2CAP_FCR_SEQ_MODULO; /* Verify the request sequence is in range before proceeding */ - if (num_bufs_acked > p_fcrb->waiting_for_ack_q.count) + if (num_bufs_acked > GKI_queue_length(&p_fcrb->waiting_for_ack_q)) { /* The channel is closed if ReqSeq is not in range */ L2CAP_TRACE_WARNING ("L2CAP eRTM Frame BAD Req_Seq - ctrl_word: 0x%04x req_seq 0x%02x last_rx_ack: 0x%02x QCount: %u", - ctrl_word, req_seq, p_fcrb->last_rx_ack, p_fcrb->waiting_for_ack_q.count); + ctrl_word, req_seq, p_fcrb->last_rx_ack, GKI_queue_length(&p_fcrb->waiting_for_ack_q)); l2cu_disconnect_chnl (p_ccb); return (FALSE); @@ -979,7 +979,7 @@ static BOOLEAN process_reqseq (tL2C_CCB *p_ccb, UINT16 ctrl_word) for (xx = 0; xx < num_bufs_acked; xx++) { - ls = ((BT_HDR *)(p_fcrb->waiting_for_ack_q.p_first))->layer_specific & L2CAP_FCR_SAR_BITS; + ls = ((BT_HDR *)(GKI_getfirst(&p_fcrb->waiting_for_ack_q)))->layer_specific & L2CAP_FCR_SAR_BITS; if ( (ls == L2CAP_FCR_UNSEG_SDU) || (ls == L2CAP_FCR_END_SDU) ) full_sdus_xmitted++; @@ -995,7 +995,7 @@ static BOOLEAN process_reqseq (tL2C_CCB *p_ccb, UINT16 ctrl_word) if ( (p_ccb->p_rcb) && (p_ccb->p_rcb->api.pL2CA_TxComplete_Cb) && (full_sdus_xmitted) ) { /* Special case for eRTM, if all packets sent, send 0xFFFF */ - if ( (p_fcrb->waiting_for_ack_q.count == 0) && (p_ccb->xmit_hold_q.count == 0) ) + if (GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q) && (GKI_queue_is_empty(&p_ccb->xmit_hold_q))) full_sdus_xmitted = 0xFFFF; (*p_ccb->p_rcb->api.pL2CA_TxComplete_Cb)(p_ccb->local_cid, full_sdus_xmitted); @@ -1003,7 +1003,7 @@ static BOOLEAN process_reqseq (tL2C_CCB *p_ccb, UINT16 ctrl_word) } /* If anything still waiting for ack, restart the timer if it was stopped */ - if (p_fcrb->waiting_for_ack_q.count) + if (!GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q)) l2c_fcr_start_timer (p_ccb); return (TRUE); @@ -1165,9 +1165,9 @@ static void process_i_frame (tL2C_CCB *p_ccb, BT_HDR *p_buf, UINT16 ctrl_word, B if (p_fcrb->srej_sent) { /* If SREJ sent, save the frame for later processing as long as it is in sequence */ - next_srej = (((BT_HDR *)p_fcrb->srej_rcv_hold_q.p_last)->layer_specific + 1) & L2CAP_FCR_SEQ_MODULO; + next_srej = (((BT_HDR *)GKI_getlast(&p_fcrb->srej_rcv_hold_q))->layer_specific + 1) & L2CAP_FCR_SEQ_MODULO; - if ( (tx_seq == next_srej) && (p_fcrb->srej_rcv_hold_q.count < p_ccb->our_cfg.fcr.tx_win_sz) ) + if ( (tx_seq == next_srej) && (GKI_queue_length(&p_fcrb->srej_rcv_hold_q) < p_ccb->our_cfg.fcr.tx_win_sz) ) { /* If user gave us a pool for held rx buffers, use that */ if (p_ccb->ertm_info.fcr_rx_pool_id != HCI_ACL_POOL_ID) @@ -1197,7 +1197,7 @@ static void process_i_frame (tL2C_CCB *p_ccb, BT_HDR *p_buf, UINT16 ctrl_word, B else { L2CAP_TRACE_WARNING ("process_i_frame() CID: 0x%04x frame dropped in Srej Sent next_srej:%u hold_q.count:%u win_sz:%u", - p_ccb->local_cid, next_srej, p_fcrb->srej_rcv_hold_q.count, p_ccb->our_cfg.fcr.tx_win_sz); + p_ccb->local_cid, next_srej, GKI_queue_length(&p_fcrb->srej_rcv_hold_q), p_ccb->our_cfg.fcr.tx_win_sz); p_fcrb->rej_after_srej = TRUE; GKI_freebuf (p_buf); @@ -1225,10 +1225,10 @@ static void process_i_frame (tL2C_CCB *p_ccb, BT_HDR *p_buf, UINT16 ctrl_word, B } else { - if (p_fcrb->srej_rcv_hold_q.count != 0) + if (!GKI_queue_is_empty(&p_fcrb->srej_rcv_hold_q)) { L2CAP_TRACE_ERROR ("process_i_frame() CID: 0x%04x sending SREJ tx_seq:%d hold_q.count:%u", - p_ccb->local_cid, tx_seq, p_fcrb->srej_rcv_hold_q.count); + p_ccb->local_cid, tx_seq, GKI_queue_length(&p_fcrb->srej_rcv_hold_q)); } p_buf->layer_specific = tx_seq; GKI_enqueue (&p_fcrb->srej_rcv_hold_q, p_buf); @@ -1275,8 +1275,8 @@ static void process_i_frame (tL2C_CCB *p_ccb, BT_HDR *p_buf, UINT16 ctrl_word, B (L2CAP_FCR_ACK_TOUT*QUICK_TIMER_TICKS_PER_SEC)/1000); } } - else if ( ((p_ccb->xmit_hold_q.count == 0) || (l2c_fcr_is_flow_controlled (p_ccb))) - && (p_ccb->fcrb.srej_rcv_hold_q.count == 0) ) + else if ( ((GKI_queue_is_empty(&p_ccb->xmit_hold_q)) || (l2c_fcr_is_flow_controlled (p_ccb))) + && (GKI_queue_is_empty(&p_ccb->fcrb.srej_rcv_hold_q))) { if (p_fcrb->local_busy) l2c_fcr_send_S_frame (p_ccb, L2CAP_FCR_SUP_RNR, 0); @@ -1517,13 +1517,13 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq) UINT8 buf_seq; UINT16 ctrl_word; - if ( (p_ccb->fcrb.waiting_for_ack_q.p_first) + if ( (GKI_getfirst(&p_ccb->fcrb.waiting_for_ack_q)) && (p_ccb->peer_cfg.fcr.max_transmit != 0) && (p_ccb->fcrb.num_tries >= p_ccb->peer_cfg.fcr.max_transmit) ) { L2CAP_TRACE_EVENT ("Max Tries Exceeded: (last_acq: %d CID: 0x%04x num_tries: %u (max: %u) ack_q_count: %u", p_ccb->fcrb.last_rx_ack, p_ccb->local_cid, p_ccb->fcrb.num_tries, p_ccb->peer_cfg.fcr.max_transmit, - p_ccb->fcrb.waiting_for_ack_q.count); + GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q)); l2cu_disconnect_chnl (p_ccb); return (FALSE); @@ -1534,7 +1534,7 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq) { /* If sending only one, the sequence number tells us which one. Look for it. */ - for (p_buf = (BT_HDR *)p_ccb->fcrb.waiting_for_ack_q.p_first; p_buf; p_buf = (BT_HDR *)GKI_getnext (p_buf)) + for (p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->fcrb.waiting_for_ack_q); p_buf; p_buf = (BT_HDR *)GKI_getnext (p_buf)) { /* Get the old control word */ p = ((UINT8 *) (p_buf+1)) + p_buf->offset + L2CAP_PKT_OVERHEAD; @@ -1551,7 +1551,7 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq) if (!p_buf) { - L2CAP_TRACE_ERROR ("retransmit_i_frames() UNKNOWN seq: %u q_count: %u", tx_seq, p_ccb->fcrb.waiting_for_ack_q.count); + L2CAP_TRACE_ERROR ("retransmit_i_frames() UNKNOWN seq: %u q_count: %u", tx_seq, GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q)); return (TRUE); } } @@ -1559,7 +1559,7 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq) { /* Retransmitting everything. Flush buffers we already put in the link xmit queue. */ - p_buf = (BT_HDR *)p_ccb->p_lcb->link_xmit_data_q.p_first; + p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->p_lcb->link_xmit_data_q); while (p_buf != NULL) { @@ -1577,10 +1577,10 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq) } /* Also flush our retransmission queue */ - while (p_ccb->fcrb.retrans_q.p_first) + while (!GKI_queue_is_empty(&p_ccb->fcrb.retrans_q)) GKI_freebuf (GKI_dequeue (&p_ccb->fcrb.retrans_q)); - p_buf = (BT_HDR *)p_ccb->fcrb.waiting_for_ack_q.p_first; + p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->fcrb.waiting_for_ack_q); } while (p_buf != NULL) @@ -1602,7 +1602,7 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq) l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL); - if (p_ccb->fcrb.waiting_for_ack_q.count) + if (GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q)) { p_ccb->fcrb.num_tries++; l2c_fcr_start_timer (p_ccb); @@ -1633,7 +1633,7 @@ BT_HDR *l2c_fcr_get_next_xmit_sdu_seg (tL2C_CCB *p_ccb, UINT16 max_packet_length /* If there is anything in the retransmit queue, that goes first */ - if (p_ccb->fcrb.retrans_q.p_first) + if (GKI_getfirst(&p_ccb->fcrb.retrans_q)) { p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->fcrb.retrans_q); @@ -1668,7 +1668,7 @@ BT_HDR *l2c_fcr_get_next_xmit_sdu_seg (tL2C_CCB *p_ccb, UINT16 max_packet_length max_pdu = max_packet_length - L2CAP_MAX_HEADER_FCS; } - p_buf = (BT_HDR *)p_ccb->xmit_hold_q.p_first; + p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->xmit_hold_q); /* If there is more data than the MPS, it requires segmentation */ if (p_buf->len > max_pdu) diff --git a/stack/l2cap/l2c_link.c b/stack/l2cap/l2c_link.c index 240ead3dc..55978a236 100644 --- a/stack/l2cap/l2c_link.c +++ b/stack/l2cap/l2c_link.c @@ -803,7 +803,7 @@ void l2c_link_adjust_allocation (void) /* this link may have sent anything but some other link sent packets so */ /* so we may need a timer to kick off this link's transmissions. */ if ( (p_lcb->link_state == LST_CONNECTED) - && (p_lcb->link_xmit_data_q.count) + && (!GKI_queue_is_empty(&p_lcb->link_xmit_data_q)) && (p_lcb->sent_not_acked < p_lcb->link_xmit_quota) ) btu_start_timer (&p_lcb->timer_entry, BTU_TTYPE_L2CAP_LINK, L2CAP_LINK_FLOW_CONTROL_TOUT); } @@ -1066,11 +1066,11 @@ BOOLEAN l2c_link_check_power_mode (tL2C_LCB *p_lcb) /* * We only switch park to active only if we have unsent packets */ - if ( p_lcb->link_xmit_data_q.count == 0 ) + if ( GKI_queue_is_empty(&p_lcb->link_xmit_data_q)) { for (p_ccb = p_lcb->ccb_queue.p_first_ccb; p_ccb; p_ccb = p_ccb->p_next_ccb) { - if (p_ccb->xmit_hold_q.count != 0) + if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) { need_to_active = TRUE; break; @@ -1261,7 +1261,7 @@ void l2c_link_check_send_pkts (tL2C_LCB *p_lcb, tL2C_CCB *p_ccb, BT_HDR *p_buf) /* There is a special case where we have readjusted the link quotas and */ /* this link may have sent anything but some other link sent packets so */ /* so we may need a timer to kick off this link's transmissions. */ - if ( (p_lcb->link_xmit_data_q.count) && (p_lcb->sent_not_acked < p_lcb->link_xmit_quota) ) + if ( (!GKI_queue_is_empty(&p_lcb->link_xmit_data_q)) && (p_lcb->sent_not_acked < p_lcb->link_xmit_quota) ) btu_start_timer (&p_lcb->timer_entry, BTU_TTYPE_L2CAP_LINK, L2CAP_LINK_FLOW_CONTROL_TOUT); } diff --git a/stack/l2cap/l2c_main.c b/stack/l2cap/l2c_main.c index 325f7af8e..1b45713e0 100755 --- a/stack/l2cap/l2c_main.c +++ b/stack/l2cap/l2c_main.c @@ -152,11 +152,11 @@ void l2c_rcv_acl_data (BT_HDR *p_msg) { L2CAP_TRACE_WARNING ("L2CAP - holding ACL for unknown handle:%d ls:%d cid:%d opcode:%d cur count:%d", handle, p_msg->layer_specific, rcv_cid, cmd_code, - l2cb.rcv_hold_q.count); + GKI_queue_length(&l2cb.rcv_hold_q)); p_msg->layer_specific = 2; GKI_enqueue (&l2cb.rcv_hold_q, p_msg); - if (l2cb.rcv_hold_q.count == 1) + if (GKI_queue_length(&l2cb.rcv_hold_q) == 1) btu_start_timer (&l2cb.rcv_hold_tle, BTU_TTYPE_L2CAP_HOLD, BT_1SEC_TIMEOUT); return; @@ -164,7 +164,7 @@ void l2c_rcv_acl_data (BT_HDR *p_msg) else { L2CAP_TRACE_ERROR ("L2CAP - rcvd ACL for unknown handle:%d ls:%d cid:%d opcode:%d cur count:%d", - handle, p_msg->layer_specific, rcv_cid, cmd_code, l2cb.rcv_hold_q.count); + handle, p_msg->layer_specific, rcv_cid, cmd_code, GKI_queue_length(&l2cb.rcv_hold_q)); } GKI_freebuf (p_msg); return; @@ -816,7 +816,7 @@ void l2c_process_held_packets (BOOLEAN timed_out) BT_HDR *p_buf, *p_buf1; BUFFER_Q *p_rcv_hold_q = &l2cb.rcv_hold_q; - if (!p_rcv_hold_q->count) + if (GKI_queue_is_empty(p_rcv_hold_q)) return; if (!timed_out) @@ -842,7 +842,7 @@ void l2c_process_held_packets (BOOLEAN timed_out) } /* If anyone still in the queue, restart the timeout */ - if (p_rcv_hold_q->count) + if (!GKI_queue_is_empty(p_rcv_hold_q)) btu_start_timer (&l2cb.rcv_hold_tle, BTU_TTYPE_L2CAP_HOLD, BT_1SEC_TIMEOUT); } @@ -984,7 +984,7 @@ UINT8 l2c_data_write (UINT16 cid, BT_HDR *p_data, UINT16 flags) if (p_ccb->cong_sent) { L2CAP_TRACE_ERROR ("L2CAP - CID: 0x%04x cannot send, already congested xmit_hold_q.count: %u buff_quota: %u", - p_ccb->local_cid, p_ccb->xmit_hold_q.count, p_ccb->buff_quota); + p_ccb->local_cid, GKI_queue_length(&p_ccb->xmit_hold_q), p_ccb->buff_quota); GKI_freebuf (p_data); return (L2CAP_DW_FAILED); diff --git a/stack/l2cap/l2c_utils.c b/stack/l2cap/l2c_utils.c index 77849ac12..2d54ca502 100644 --- a/stack/l2cap/l2c_utils.c +++ b/stack/l2cap/l2c_utils.c @@ -210,7 +210,7 @@ void l2cu_release_lcb (tL2C_LCB *p_lcb) btm_acl_removed (p_lcb->remote_bd_addr, BT_TRANSPORT_BR_EDR); #endif /* Release any held buffers */ - while (p_lcb->link_xmit_data_q.p_first) + while (!GKI_queue_is_empty(&p_lcb->link_xmit_data_q)) GKI_freebuf (GKI_dequeue (&p_lcb->link_xmit_data_q)); #if (L2CAP_UCD_INCLUDED == TRUE) @@ -934,7 +934,7 @@ void l2cu_send_peer_disc_req (tL2C_CCB *p_ccb) */ if (p_ccb->peer_cfg.fcr.mode == L2CAP_FCR_BASIC_MODE) { - while (p_ccb->xmit_hold_q.p_first) + while (GKI_getfirst(&p_ccb->xmit_hold_q)) { p_buf2 = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q); l2cu_set_acl_hci_header (p_buf2, p_ccb); @@ -1685,7 +1685,7 @@ void l2cu_release_ccb (tL2C_CCB *p_ccb) /* Stop the timer */ btu_stop_timer (&p_ccb->timer_entry); - while (p_ccb->xmit_hold_q.p_first) + while (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) GKI_freebuf (GKI_dequeue (&p_ccb->xmit_hold_q)); l2c_fcr_cleanup (p_ccb); @@ -3108,7 +3108,7 @@ static tL2C_CCB *l2cu_get_next_channel_in_rr(tL2C_LCB *p_lcb) } L2CAP_TRACE_DEBUG("RR scan pri=%d, lcid=0x%04x, q_cout=%d", - p_ccb->ccb_priority, p_ccb->local_cid, p_ccb->xmit_hold_q.count ); + p_ccb->ccb_priority, p_ccb->local_cid, GKI_queue_length(&p_ccb->xmit_hold_q)); /* store the next serving channel */ /* this channel is the last channel of its priority group */ @@ -3133,9 +3133,9 @@ static tL2C_CCB *l2cu_get_next_channel_in_rr(tL2C_LCB *p_lcb) if (p_ccb->fcrb.wait_ack || p_ccb->fcrb.remote_busy) continue; - if ( p_ccb->fcrb.retrans_q.count == 0 ) + if ( GKI_queue_is_empty(&p_ccb->fcrb.retrans_q)) { - if ( p_ccb->xmit_hold_q.count == 0 ) + if ( GKI_queue_is_empty(&p_ccb->xmit_hold_q)) continue; /* If using the common pool, should be at least 10% free. */ @@ -3149,7 +3149,7 @@ static tL2C_CCB *l2cu_get_next_channel_in_rr(tL2C_LCB *p_lcb) } else { - if (p_ccb->xmit_hold_q.count == 0) + if (GKI_queue_is_empty(&p_ccb->xmit_hold_q)) continue; } @@ -3259,9 +3259,9 @@ BT_HDR *l2cu_get_next_buffer_to_send (tL2C_LCB *p_lcb) continue; /* No more checks needed if sending from the reatransmit queue */ - if (p_ccb->fcrb.retrans_q.count == 0) + if (GKI_queue_is_empty(&p_ccb->fcrb.retrans_q)) { - if (p_ccb->xmit_hold_q.count == 0) + if (GKI_queue_is_empty(&p_ccb->xmit_hold_q)) continue; /* If using the common pool, should be at least 10% free. */ @@ -3282,7 +3282,7 @@ BT_HDR *l2cu_get_next_buffer_to_send (tL2C_LCB *p_lcb) } else { - if (p_ccb->xmit_hold_q.count != 0) + if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) { p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q); if(NULL == p_buf) @@ -3408,7 +3408,7 @@ void l2cu_set_acl_hci_header (BT_HDR *p_buf, tL2C_CCB *p_ccb) *******************************************************************************/ void l2cu_check_channel_congestion (tL2C_CCB *p_ccb) { - UINT16 q_count = p_ccb->xmit_hold_q.count; + UINT16 q_count = GKI_queue_length(&p_ccb->xmit_hold_q); #if (L2CAP_UCD_INCLUDED == TRUE) if ( p_ccb->local_cid == L2CAP_CONNECTIONLESS_CID ) diff --git a/stack/rfcomm/port_api.c b/stack/rfcomm/port_api.c index 364308838..83c94da02 100644 --- a/stack/rfcomm/port_api.c +++ b/stack/rfcomm/port_api.c @@ -1132,7 +1132,7 @@ int PORT_Purge (UINT16 handle, UINT8 purge_flags) { PORT_SCHEDULE_LOCK; /* to prevent missing credit */ - count = p_port->rx.queue.count; + count = GKI_queue_length(&p_port->rx.queue); while ((p_buf = (BT_HDR *)GKI_dequeue (&p_port->rx.queue)) != NULL) GKI_freebuf (p_buf); @@ -1368,7 +1368,7 @@ static int port_write (tPORT *p_port, BT_HDR *p_buf) (PORT_CTRL_REQ_SENT | PORT_CTRL_IND_RECEIVED))) { if ((p_port->tx.queue_size > PORT_TX_CRITICAL_WM) - || (p_port->tx.queue.count > PORT_TX_BUF_CRITICAL_WM)) + || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_CRITICAL_WM)) { RFCOMM_TRACE_WARNING ("PORT_Write: Queue size: %d", p_port->tx.queue_size); @@ -1526,7 +1526,7 @@ int PORT_WriteDataCO (UINT16 handle, int* p_len) /* data fits into the end of the queue */ PORT_SCHEDULE_LOCK; - if (((p_buf = (BT_HDR *)p_port->tx.queue.p_last) != NULL) + if (((p_buf = (BT_HDR *)GKI_getlast(&p_port->tx.queue)) != NULL) && (((int)p_buf->len + available) <= (int)p_port->peer_mtu) && (((int)p_buf->len + available) <= (int)length)) { @@ -1560,12 +1560,12 @@ int PORT_WriteDataCO (UINT16 handle, int* p_len) { /* if we're over buffer high water mark, we're done */ if ((p_port->tx.queue_size > PORT_TX_HIGH_WM) - || (p_port->tx.queue.count > PORT_TX_BUF_HIGH_WM)) + || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM)) { port_flow_control_user(p_port); event |= PORT_EV_FC; debug("tx queue is full,tx.queue_size:%d,tx.queue.count:%d,available:%d", - p_port->tx.queue_size, p_port->tx.queue.count, available); + p_port->tx.queue_size, GKI_queue_length(&p_port->tx.queue), available); break; } @@ -1677,7 +1677,7 @@ int PORT_WriteData (UINT16 handle, char *p_data, UINT16 max_len, UINT16 *p_len) /* data fits into the end of the queue */ PORT_SCHEDULE_LOCK; - if (((p_buf = (BT_HDR *)p_port->tx.queue.p_last) != NULL) + if (((p_buf = (BT_HDR *)GKI_getlast(&p_port->tx.queue)) != NULL) && ((p_buf->len + max_len) <= p_port->peer_mtu) && ((p_buf->len + max_len) <= length)) { @@ -1698,7 +1698,7 @@ int PORT_WriteData (UINT16 handle, char *p_data, UINT16 max_len, UINT16 *p_len) { /* if we're over buffer high water mark, we're done */ if ((p_port->tx.queue_size > PORT_TX_HIGH_WM) - || (p_port->tx.queue.count > PORT_TX_BUF_HIGH_WM)) + || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM)) break; /* continue with rfcomm data write */ diff --git a/stack/rfcomm/port_rfc.c b/stack/rfcomm/port_rfc.c index 5704c017c..3fa301380 100644 --- a/stack/rfcomm/port_rfc.c +++ b/stack/rfcomm/port_rfc.c @@ -865,7 +865,7 @@ void PORT_DataInd (tRFC_MCB *p_mcb, UINT8 dlci, BT_HDR *p_buf) /* Check if rx queue exceeds the limit */ if ((p_port->rx.queue_size + p_buf->len > PORT_RX_CRITICAL_WM) - || (p_port->rx.queue.count + 1 > p_port->rx_buf_critical)) + || (GKI_queue_length(&p_port->rx.queue) + 1 > p_port->rx_buf_critical)) { RFCOMM_TRACE_EVENT ("PORT_DataInd. Buffer over run. Dropping the buffer"); GKI_freebuf (p_buf); diff --git a/stack/rfcomm/port_utils.c b/stack/rfcomm/port_utils.c index 642ddb88f..7d29336dd 100644 --- a/stack/rfcomm/port_utils.c +++ b/stack/rfcomm/port_utils.c @@ -420,7 +420,7 @@ UINT32 port_flow_control_user (tPORT *p_port) || !p_port->rfc.p_mcb || !p_port->rfc.p_mcb->peer_ready || (p_port->tx.queue_size > PORT_TX_HIGH_WM) - || (p_port->tx.queue.count > PORT_TX_BUF_HIGH_WM); + || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM); if (p_port->tx.user_fc == fc) return (0); @@ -536,7 +536,7 @@ void port_flow_control_peer(tPORT *p_port, BOOLEAN enable, UINT16 count) p_port->rx.peer_fc = TRUE; } /* if queue count reached credit rx max, set peer fc */ - else if (p_port->rx.queue.count >= p_port->credit_rx_max) + else if (GKI_queue_length(&p_port->rx.queue) >= p_port->credit_rx_max) { p_port->rx.peer_fc = TRUE; } @@ -552,7 +552,7 @@ void port_flow_control_peer(tPORT *p_port, BOOLEAN enable, UINT16 count) /* check if it can be resumed now */ if (p_port->rx.peer_fc && (p_port->rx.queue_size < PORT_RX_LOW_WM) - && (p_port->rx.queue.count < PORT_RX_BUF_LOW_WM)) + && (GKI_queue_length(&p_port->rx.queue) < PORT_RX_BUF_LOW_WM)) { p_port->rx.peer_fc = FALSE; @@ -573,7 +573,7 @@ void port_flow_control_peer(tPORT *p_port, BOOLEAN enable, UINT16 count) /* Check the size of the rx queue. If it exceeds certain */ /* level and flow control has not been sent to the peer do it now */ else if ( ((p_port->rx.queue_size > PORT_RX_HIGH_WM) - || (p_port->rx.queue.count > PORT_RX_BUF_HIGH_WM)) + || (GKI_queue_length(&p_port->rx.queue) > PORT_RX_BUF_HIGH_WM)) && !p_port->rx.peer_fc) { RFCOMM_TRACE_EVENT ("PORT_DataInd Data reached HW. Sending FC set."); diff --git a/stack/rfcomm/rfc_port_fsm.c b/stack/rfcomm/rfc_port_fsm.c index a998b6ec9..c0d7fbc35 100644 --- a/stack/rfcomm/rfc_port_fsm.c +++ b/stack/rfcomm/rfc_port_fsm.c @@ -431,7 +431,7 @@ void rfc_port_sm_opened (tPORT *p_port, UINT16 event, void *p_data) case RFC_EVENT_DISC: p_port->rfc.state = RFC_STATE_CLOSED; rfc_send_ua (p_port->rfc.p_mcb, p_port->dlci); - if(p_port->rx.queue.count) + if(!GKI_queue_is_empty(&p_port->rx.queue)) { /* give a chance to upper stack to close port properly */ RFCOMM_TRACE_DEBUG("port queue is not empty"); |