diff options
author | Chris Manton <cmanton@google.com> | 2014-05-06 10:35:42 -0700 |
---|---|---|
committer | Andre Eisenbach <eisenbach@google.com> | 2015-03-16 16:51:29 -0700 |
commit | fe7216ca12f91baae733e7c93063db73121af308 (patch) | |
tree | 03a66f188e1fd2e8daa48d5be04220b80422d7bf | |
parent | 284440f0c9f9fe15b162e37ef2bf6af439407447 (diff) | |
download | android_system_bt-fe7216ca12f91baae733e7c93063db73121af308.tar.gz android_system_bt-fe7216ca12f91baae733e7c93063db73121af308.tar.bz2 android_system_bt-fe7216ca12f91baae733e7c93063db73121af308.zip |
Enforce GKI API buffer usage
Also add another API GKI_queue_length(BUFFER_Q *)
36 files changed, 198 insertions, 187 deletions
diff --git a/bta/av/bta_av_aact.c b/bta/av/bta_av_aact.c index e8e5edc70..22128a539 100644 --- a/bta/av/bta_av_aact.c +++ b/bta/av/bta_av_aact.c @@ -2262,7 +2262,7 @@ void bta_av_data_path (tBTA_AV_SCB *p_scb, tBTA_AV_DATA *p_data) else { /* just dequeue it from the q_info.a2d */ - if(p_scb->q_info.a2d.count < 3) + if(GKI_queue_length(&p_scb->q_info.a2d) < 3) { /* put it back to the queue */ GKI_enqueue_head (&p_scb->q_info.a2d, p_buf); diff --git a/bta/av/bta_av_main.c b/bta/av/bta_av_main.c index a8544c4a2..fe5f4ebd1 100644 --- a/bta/av/bta_av_main.c +++ b/bta/av/bta_av_main.c @@ -1206,7 +1206,7 @@ void bta_av_dup_audio_buf(tBTA_AV_SCB *p_scb, BT_HDR *p_buf) memcpy(p_new, p_buf, copy_size); pq = &p_scbi->q_info.a2d; GKI_enqueue(pq, p_new); - if(pq->count > p_bta_av_cfg->audio_mqs) + if(GKI_queue_length(pq) > p_bta_av_cfg->audio_mqs) { bta_av_co_audio_drop(p_scbi->hndl); GKI_freebuf(GKI_dequeue(pq)); diff --git a/bta/gatt/bta_gattc_act.c b/bta/gatt/bta_gattc_act.c index 546a56f26..c92abaed2 100755 --- a/bta/gatt/bta_gattc_act.c +++ b/bta/gatt/bta_gattc_act.c @@ -1025,7 +1025,7 @@ void bta_gattc_disc_cmpl(tBTA_GATTC_CLCB *p_clcb, tBTA_GATTC_DATA *p_data) /* clean up cache */ if(p_clcb->p_srcb && p_clcb->p_srcb->p_srvc_cache) { - while (p_clcb->p_srcb->cache_buffer.p_first) + while (!GKI_queue_is_empty(&p_clcb->p_srcb->cache_buffer)) { GKI_freebuf (GKI_dequeue (&p_clcb->p_srcb->cache_buffer)); } @@ -1898,7 +1898,7 @@ void bta_gattc_process_api_refresh(tBTA_GATTC_CB *p_cb, tBTA_GATTC_DATA * p_msg) /* in all other cases, mark it and delete the cache */ if (p_srvc_cb->p_srvc_cache != NULL) { - while (p_srvc_cb->cache_buffer.p_first) + while (!GKI_queue_is_empty(&p_srvc_cb->cache_buffer)) GKI_freebuf (GKI_dequeue (&p_srvc_cb->cache_buffer)); p_srvc_cb->p_srvc_cache = NULL; diff --git a/bta/gatt/bta_gattc_cache.c b/bta/gatt/bta_gattc_cache.c index db9ebb60c..faf1596c9 100644 --- a/bta/gatt/bta_gattc_cache.c +++ b/bta/gatt/bta_gattc_cache.c @@ -171,7 +171,7 @@ tBTA_GATT_STATUS bta_gattc_init_cache(tBTA_GATTC_SERV *p_srvc_cb) { tBTA_GATT_STATUS status = BTA_GATT_OK; - while (p_srvc_cb->cache_buffer.p_first) + while (!GKI_queue_is_empty(&p_srvc_cb->cache_buffer)) GKI_freebuf (GKI_dequeue (&p_srvc_cb->cache_buffer)); utl_freebuf((void **)&p_srvc_cb->p_srvc_list); @@ -1495,7 +1495,7 @@ void bta_gattc_rebuild_cache(tBTA_GATTC_SERV *p_srvc_cb, UINT16 num_attr, APPL_TRACE_ERROR("bta_gattc_rebuild_cache"); if (attr_index == 0) { - while (p_srvc_cb->cache_buffer.p_first) + while (!GKI_queue_is_empty(&p_srvc_cb->cache_buffer)) GKI_freebuf (GKI_dequeue (&p_srvc_cb->cache_buffer)); if (bta_gattc_alloc_cache_buf(p_srvc_cb) == NULL) diff --git a/bta/gatt/bta_gattc_utils.c b/bta/gatt/bta_gattc_utils.c index a7a955727..69be9b0f3 100644 --- a/bta/gatt/bta_gattc_utils.c +++ b/bta/gatt/bta_gattc_utils.c @@ -410,7 +410,7 @@ tBTA_GATTC_SERV * bta_gattc_srcb_alloc(BD_ADDR bda) if (p_tcb != NULL) { - while (p_tcb->cache_buffer.p_first) + while (!GKI_queue_is_empty(&p_tcb->cache_buffer)) GKI_freebuf (GKI_dequeue (&p_tcb->cache_buffer)); utl_freebuf((void **)&p_tcb->p_srvc_list); diff --git a/bta/pan/bta_pan_act.c b/bta/pan/bta_pan_act.c index ca3567c80..be4dd997a 100644 --- a/bta/pan/bta_pan_act.c +++ b/bta/pan/bta_pan_act.c @@ -665,7 +665,7 @@ void bta_pan_tx_path(tBTA_PAN_SCB *p_scb, tBTA_PAN_DATA *p_data) bta_pan_co_tx_path(p_scb->handle, p_scb->app_id); /* free data that exceeds queue level */ - while(p_scb->data_queue.count > bta_pan_cb.q_level) + while(GKI_queue_length(&p_scb->data_queue) > bta_pan_cb.q_level) GKI_freebuf(GKI_dequeue(&p_scb->data_queue)); bta_pan_pm_conn_idle(p_scb); } @@ -690,12 +690,12 @@ void bta_pan_tx_path(tBTA_PAN_SCB *p_scb, tBTA_PAN_DATA *p_data) } /* free data that exceeds queue level */ - while(p_scb->data_queue.count > bta_pan_cb.q_level) + while(GKI_queue_length(&p_scb->data_queue) > bta_pan_cb.q_level) GKI_freebuf(GKI_dequeue(&p_scb->data_queue)); /* if there is more data to be passed to upper layer */ - if(p_scb->data_queue.count) + if(!GKI_queue_is_empty(&p_scb->data_queue)) { if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) { diff --git a/btif/src/btif_media_task.c b/btif/src/btif_media_task.c index de34818b9..b85589c24 100644 --- a/btif/src/btif_media_task.c +++ b/btif/src/btif_media_task.c @@ -373,7 +373,7 @@ static void log_tstamps_us(char *comment) static UINT64 prev_us = 0; const UINT64 now_us = time_now_us(); APPL_TRACE_DEBUG("[%s] ts %08llu, diff : %08llu, queue sz %d", comment, now_us, now_us - prev_us, - btif_media_cb.TxAaQ.count); + GKI_queue_length(&btif_media_cb.TxAaQ)); prev_us = now_us; } @@ -2498,8 +2498,8 @@ UINT8 btif_media_sink_enque_buf(BT_HDR *p_pkt) tBT_SBC_HDR *p_msg; if(btif_media_cb.rx_flush == TRUE) /* Flush enabled, do not enque*/ - return btif_media_cb.RxSbcQ.count; - if(btif_media_cb.RxSbcQ.count == MAX_OUTPUT_A2DP_FRAME_QUEUE_SZ) + return GKI_queue_length(&btif_media_cb.RxSbcQ); + if(GKI_queue_length(&btif_media_cb.RxSbcQ) == MAX_OUTPUT_A2DP_FRAME_QUEUE_SZ) { GKI_freebuf(GKI_dequeue(&(btif_media_cb.RxSbcQ))); } @@ -2513,7 +2513,7 @@ UINT8 btif_media_sink_enque_buf(BT_HDR *p_pkt) p_msg->num_frames_to_be_processed = (*((UINT8*)(p_msg + 1) + p_msg->offset)) & 0x0f; BTIF_TRACE_VERBOSE("btif_media_sink_enque_buf + ", p_msg->num_frames_to_be_processed); GKI_enqueue(&(btif_media_cb.RxSbcQ), p_msg); - if(btif_media_cb.RxSbcQ.count == MAX_A2DP_DELAYED_START_FRAME_COUNT) + if(GKI_queue_length(&btif_media_cb.RxSbcQ) == MAX_A2DP_DELAYED_START_FRAME_COUNT) { BTIF_TRACE_DEBUG(" Initiate Decoding "); btif_media_task_start_decoding_req(); @@ -2524,7 +2524,7 @@ UINT8 btif_media_sink_enque_buf(BT_HDR *p_pkt) /* let caller deal with a failed allocation */ BTIF_TRACE_VERBOSE("btif_media_sink_enque_buf No Buffer left - "); } - return btif_media_cb.RxSbcQ.count; + return GKI_queue_length(&btif_media_cb.RxSbcQ); } /******************************************************************************* @@ -2744,7 +2744,7 @@ static void btif_media_aa_prep_sbc_2_send(UINT8 nb_frame) if (NULL == (p_buf = GKI_getpoolbuf(BTIF_MEDIA_AA_POOL_ID))) { APPL_TRACE_ERROR ("ERROR btif_media_aa_prep_sbc_2_send no buffer TxCnt %d ", - btif_media_cb.TxAaQ.count); + GKI_queue_length(&btif_media_cb.TxAaQ)); return; } @@ -2810,7 +2810,7 @@ static void btif_media_aa_prep_sbc_2_send(UINT8 nb_frame) { APPL_TRACE_DEBUG("### tx suspended, discarded frame ###"); - if (btif_media_cb.TxAaQ.count > 0) + if (GKI_queue_length(&btif_media_cb.TxAaQ) > 0) btif_media_flush_q(&(btif_media_cb.TxAaQ)); GKI_freebuf(p_buf); @@ -2842,14 +2842,14 @@ static void btif_media_aa_prep_2_send(UINT8 nb_frame) { VERBOSE("%s() - frames=%d (queue=%d)", __FUNCTION__, nb_frame, btif_media_cb.TxAaQ.count); - while (btif_media_cb.TxAaQ.count >= (MAX_OUTPUT_A2DP_FRAME_QUEUE_SZ-nb_frame)) + while (GKI_queue_length(&btif_media_cb.TxAaQ) >= (MAX_OUTPUT_A2DP_FRAME_QUEUE_SZ-nb_frame)) { APPL_TRACE_WARNING("%s() - TX queue buffer count %d", - __FUNCTION__, btif_media_cb.TxAaQ.count); + __FUNCTION__, GKI_queue_length(&btif_media_cb.TxAaQ)); GKI_freebuf(GKI_dequeue(&(btif_media_cb.TxAaQ))); } - if (btif_media_cb.TxAaQ.count) --nb_frame; + if (GKI_queue_length(&btif_media_cb.TxAaQ)) --nb_frame; switch (btif_media_cb.TxTranscoding) { diff --git a/gki/common/gki.h b/gki/common/gki.h index 8bbcd108a..0e6f892b8 100644 --- a/gki/common/gki.h +++ b/gki/common/gki.h @@ -111,9 +111,9 @@ typedef struct */ typedef struct { - void *p_first; - void *p_last; - UINT16 count; + void *_p_first; + void *_p_last; + UINT16 _count; } BUFFER_Q; #define GKI_IS_QUEUE_EMPTY(p_q) ((p_q)->count == 0) @@ -176,6 +176,7 @@ GKI_API extern void *GKI_getfirst (BUFFER_Q *); GKI_API extern void *GKI_getlast (BUFFER_Q *); GKI_API extern void *GKI_getnext (void *); GKI_API extern void GKI_init_q (BUFFER_Q *); +GKI_API extern UINT16 GKI_queue_length(BUFFER_Q *); GKI_API extern BOOLEAN GKI_queue_is_empty(BUFFER_Q *); GKI_API extern void *GKI_remove_from_queue (BUFFER_Q *, void *); GKI_API extern UINT16 GKI_get_pool_bufsize (UINT8); diff --git a/gki/common/gki_buffer.c b/gki/common/gki_buffer.c index 0ce98a16c..fce597e8d 100644 --- a/gki/common/gki_buffer.c +++ b/gki/common/gki_buffer.c @@ -70,7 +70,7 @@ static void gki_init_free_queue (UINT8 id, UINT16 size, UINT16 total, void *p_me if(p_mem) { hdr = (BUFFER_HDR_T *)p_mem; - p_cb->freeq[id].p_first = hdr; + p_cb->freeq[id]._p_first = hdr; for (i = 0; i < total; i++) { hdr->task_id = GKI_INVALID_TASK; @@ -83,7 +83,7 @@ static void gki_init_free_queue (UINT8 id, UINT16 size, UINT16 total, void *p_me hdr1->p_next = hdr; } hdr1->p_next = NULL; - p_cb->freeq[id].p_last = hdr1; + p_cb->freeq[id]._p_last = hdr1; } // btla-specific -- return; @@ -99,7 +99,7 @@ static BOOLEAN gki_alloc_free_queue(UINT8 id) Q = &p_cb->freeq[p_cb->pool_list[id]]; - if(Q->p_first == 0) + if(Q->_p_first == 0) { void* p_mem = GKI_os_malloc((Q->size + BUFFER_PADDING_SIZE) * Q->total); if(p_mem) @@ -129,8 +129,8 @@ void gki_dealloc_free_queue(void) p_cb->freeq[i].cur_cnt = 0; p_cb->freeq[i].max_cnt = 0; - p_cb->freeq[i].p_first = NULL; - p_cb->freeq[i].p_last = NULL; + p_cb->freeq[i]._p_first = NULL; + p_cb->freeq[i]._p_last = NULL; p_cb->pool_start[i] = NULL; p_cb->pool_end[i] = NULL; @@ -173,8 +173,8 @@ void gki_buffer_init(void) p_cb->pool_end[tt] = NULL; p_cb->pool_size[tt] = 0; - p_cb->freeq[tt].p_first = 0; - p_cb->freeq[tt].p_last = 0; + p_cb->freeq[tt]._p_first = 0; + p_cb->freeq[tt]._p_last = 0; p_cb->freeq[tt].size = 0; p_cb->freeq[tt].total = 0; p_cb->freeq[tt].cur_cnt = 0; @@ -342,8 +342,8 @@ void gki_buffer_init(void) *******************************************************************************/ void GKI_init_q (BUFFER_Q *p_q) { - p_q->p_first = p_q->p_last = NULL; - p_q->count = 0; + p_q->_p_first = p_q->_p_last = NULL; + p_q->_count = 0; return; } @@ -410,18 +410,17 @@ void *GKI_getbuf (UINT16 size) { // btla-specific ++ #ifdef GKI_USE_DEFERED_ALLOC_BUF_POOLS - if(Q->p_first == 0 && gki_alloc_free_queue(i) != TRUE) - { + if(Q->_p_first == 0 && gki_alloc_free_queue(i) != TRUE) { GKI_enable(); return NULL; } #endif // btla-specific -- - p_hdr = Q->p_first; - Q->p_first = p_hdr->p_next; + p_hdr = Q->_p_first; + Q->_p_first = p_hdr->p_next; - if (!Q->p_first) - Q->p_last = NULL; + if (!Q->_p_first) + Q->_p_last = NULL; if(++Q->cur_cnt > Q->max_cnt) Q->max_cnt = Q->cur_cnt; @@ -480,18 +479,17 @@ void *GKI_getpoolbuf (UINT8 pool_id) { // btla-specific ++ #ifdef GKI_USE_DEFERED_ALLOC_BUF_POOLS - if(Q->p_first == 0 && gki_alloc_free_queue(pool_id) != TRUE) - { + if(Q->_p_first == 0 && gki_alloc_free_queue(pool_id) != TRUE) { GKI_enable(); return NULL; } #endif // btla-specific -- - p_hdr = Q->p_first; - Q->p_first = p_hdr->p_next; + p_hdr = Q->_p_first; + Q->_p_first = p_hdr->p_next; - if (!Q->p_first) - Q->p_last = NULL; + if (!Q->_p_first) + Q->_p_last = NULL; if(++Q->cur_cnt > Q->max_cnt) Q->max_cnt = Q->cur_cnt; @@ -560,12 +558,12 @@ void GKI_freebuf (void *p_buf) ** Release the buffer */ Q = &gki_cb.com.freeq[p_hdr->q_id]; - if (Q->p_last) - Q->p_last->p_next = p_hdr; + if (Q->_p_last) + Q->_p_last->p_next = p_hdr; else - Q->p_first = p_hdr; + Q->_p_first = p_hdr; - Q->p_last = p_hdr; + Q->_p_last = p_hdr; p_hdr->p_next = NULL; p_hdr->status = BUF_STATUS_FREE; p_hdr->task_id = GKI_INVALID_TASK; @@ -772,16 +770,16 @@ void GKI_enqueue (BUFFER_Q *p_q, void *p_buf) GKI_disable(); /* Since the queue is exposed (C vs C++), keep the pointers in exposed format */ - if (p_q->p_last) + if (p_q->_p_last) { - BUFFER_HDR_T *p_last_hdr = (BUFFER_HDR_T *)((UINT8 *)p_q->p_last - BUFFER_HDR_SIZE); - p_last_hdr->p_next = p_hdr; + BUFFER_HDR_T *_p_last_hdr = (BUFFER_HDR_T *)((UINT8 *)p_q->_p_last - BUFFER_HDR_SIZE); + _p_last_hdr->p_next = p_hdr; } else - p_q->p_first = p_buf; + p_q->_p_first = p_buf; - p_q->p_last = p_buf; - p_q->count++; + p_q->_p_last = p_buf; + p_q->_count++; p_hdr->p_next = NULL; p_hdr->status = BUF_STATUS_QUEUED; @@ -826,18 +824,18 @@ void GKI_enqueue_head (BUFFER_Q *p_q, void *p_buf) GKI_disable(); - if (p_q->p_first) + if (p_q->_p_first) { - p_hdr->p_next = (BUFFER_HDR_T *)((UINT8 *)p_q->p_first - BUFFER_HDR_SIZE); - p_q->p_first = p_buf; + p_hdr->p_next = (BUFFER_HDR_T *)((UINT8 *)p_q->_p_first - BUFFER_HDR_SIZE); + p_q->_p_first = p_buf; } else { - p_q->p_first = p_buf; - p_q->p_last = p_buf; + p_q->_p_first = p_buf; + p_q->_p_last = p_buf; p_hdr->p_next = NULL; } - p_q->count++; + p_q->_count++; p_hdr->status = BUF_STATUS_QUEUED; @@ -864,25 +862,25 @@ void *GKI_dequeue (BUFFER_Q *p_q) GKI_disable(); - if (!p_q || !p_q->count) + if (!p_q || !p_q->_count) { GKI_enable(); return (NULL); } - p_hdr = (BUFFER_HDR_T *)((UINT8 *)p_q->p_first - BUFFER_HDR_SIZE); + p_hdr = (BUFFER_HDR_T *)((UINT8 *)p_q->_p_first - BUFFER_HDR_SIZE); /* Keep buffers such that GKI header is invisible */ if (p_hdr->p_next) - p_q->p_first = ((UINT8 *)p_hdr->p_next + BUFFER_HDR_SIZE); + p_q->_p_first = ((UINT8 *)p_hdr->p_next + BUFFER_HDR_SIZE); else { - p_q->p_first = NULL; - p_q->p_last = NULL; + p_q->_p_first = NULL; + p_q->_p_last = NULL; } - p_q->count--; + p_q->_count--; p_hdr->p_next = NULL; p_hdr->status = BUF_STATUS_UNLINKED; @@ -912,14 +910,14 @@ void *GKI_remove_from_queue (BUFFER_Q *p_q, void *p_buf) GKI_disable(); - if (p_buf == p_q->p_first) + if (p_buf == p_q->_p_first) { GKI_enable(); return (GKI_dequeue (p_q)); } p_buf_hdr = (BUFFER_HDR_T *)((UINT8 *)p_buf - BUFFER_HDR_SIZE); - p_prev = (BUFFER_HDR_T *)((UINT8 *)p_q->p_first - BUFFER_HDR_SIZE); + p_prev = (BUFFER_HDR_T *)((UINT8 *)p_q->_p_first - BUFFER_HDR_SIZE); for ( ; p_prev; p_prev = p_prev->p_next) { @@ -928,12 +926,12 @@ void *GKI_remove_from_queue (BUFFER_Q *p_q, void *p_buf) { p_prev->p_next = p_buf_hdr->p_next; - /* If we are removing the last guy in the queue, update p_last */ - if (p_buf == p_q->p_last) - p_q->p_last = p_prev + 1; + /* If we are removing the last guy in the queue, update _p_last */ + if (p_buf == p_q->_p_last) + p_q->_p_last = p_prev + 1; /* One less in the queue */ - p_q->count--; + p_q->_count--; /* The buffer is now unlinked */ p_buf_hdr->p_next = NULL; @@ -961,7 +959,7 @@ void *GKI_remove_from_queue (BUFFER_Q *p_q, void *p_buf) *******************************************************************************/ void *GKI_getfirst (BUFFER_Q *p_q) { - return (p_q->p_first); + return (p_q->_p_first); } @@ -978,7 +976,7 @@ void *GKI_getfirst (BUFFER_Q *p_q) *******************************************************************************/ void *GKI_getlast (BUFFER_Q *p_q) { - return (p_q->p_last); + return (p_q->_p_last); } /******************************************************************************* @@ -1019,7 +1017,12 @@ void *GKI_getnext (void *p_buf) *******************************************************************************/ BOOLEAN GKI_queue_is_empty(BUFFER_Q *p_q) { - return ((BOOLEAN) (p_q->count == 0)); + return ((BOOLEAN) (p_q->_count == 0)); +} + +UINT16 GKI_queue_length(BUFFER_Q *p_q) +{ + return p_q->_count; } /******************************************************************************* @@ -1110,11 +1113,11 @@ void *GKI_igetpoolbuf (UINT8 pool_id) Q = &gki_cb.com.freeq[pool_id]; if(Q->cur_cnt < Q->total) { - p_hdr = Q->p_first; - Q->p_first = p_hdr->p_next; + p_hdr = Q->_p_first; + Q->_p_first = p_hdr->p_next; - if (!Q->p_first) - Q->p_last = NULL; + if (!Q->_p_first) + Q->_p_last = NULL; if(++Q->cur_cnt > Q->max_cnt) Q->max_cnt = Q->cur_cnt; diff --git a/gki/common/gki_common.h b/gki/common/gki_common.h index 042024e1f..7b15009e0 100644 --- a/gki/common/gki_common.h +++ b/gki/common/gki_common.h @@ -58,7 +58,7 @@ typedef struct _buffer_hdr { - struct _buffer_hdr *p_next; /* next buffer in the queue */ + struct _buffer_hdr *p_next; /* next buffer in the queue */ UINT8 q_id; /* id of the queue */ UINT8 task_id; /* task which allocated the buffer*/ UINT8 status; /* FREE, UNLINKED or QUEUED */ @@ -67,12 +67,12 @@ typedef struct _buffer_hdr typedef struct _free_queue { - BUFFER_HDR_T *p_first; /* first buffer in the queue */ - BUFFER_HDR_T *p_last; /* last buffer in the queue */ - UINT16 size; /* size of the buffers in the pool */ - UINT16 total; /* toatal number of buffers */ - UINT16 cur_cnt; /* number of buffers currently allocated */ - UINT16 max_cnt; /* maximum number of buffers allocated at any time */ + BUFFER_HDR_T *_p_first; /* first buffer in the queue */ + BUFFER_HDR_T *_p_last; /* last buffer in the queue */ + UINT16 size; /* size of the buffers in the pool */ + UINT16 total; /* toatal number of buffers */ + UINT16 cur_cnt; /* number of buffers currently allocated */ + UINT16 max_cnt; /* maximum number of buffers allocated at any time */ } FREE_QUEUE_T; diff --git a/gki/ulinux/gki_int.h b/gki/ulinux/gki_int.h index ec9b1cbcd..364e83050 100644 --- a/gki/ulinux/gki_int.h +++ b/gki/ulinux/gki_int.h @@ -80,6 +80,9 @@ typedef struct extern "C" { #endif +extern int acquire_wake_lock(int lock, const char* id); +extern int release_wake_lock(const char* id); + #if GKI_DYNAMIC_MEMORY == FALSE GKI_API extern tGKI_CB gki_cb; #else diff --git a/stack/avct/avct_lcb_act.c b/stack/avct/avct_lcb_act.c index 17d5d3d30..4ca1745a1 100644 --- a/stack/avct/avct_lcb_act.c +++ b/stack/avct/avct_lcb_act.c @@ -618,7 +618,7 @@ void avct_lcb_send_msg(tAVCT_LCB *p_lcb, tAVCT_LCB_EVT *p_data) pkt_type = AVCT_PKT_TYPE_END; } } - AVCT_TRACE_DEBUG ("avct_lcb_send_msg tx_q_count:%d", p_lcb->tx_q.count); + AVCT_TRACE_DEBUG ("avct_lcb_send_msg tx_q_count:%d", GKI_queue_length(&p_lcb->tx_q)); return; } diff --git a/stack/avdt/avdt_scb_act.c b/stack/avdt/avdt_scb_act.c index 72dd3cd46..8c46f7b69 100644 --- a/stack/avdt/avdt_scb_act.c +++ b/stack/avdt/avdt_scb_act.c @@ -1421,7 +1421,7 @@ void avdt_scb_snd_stream_close(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data) BT_HDR *p_frag; AVDT_TRACE_WARNING("avdt_scb_snd_stream_close c:%d, off:%d", - p_scb->frag_q.count, p_scb->frag_off); + GKI_queue_length(&p_scb->frag_q), p_scb->frag_off); /* clean fragments queue */ while((p_frag = (BT_HDR*)GKI_dequeue (&p_scb->frag_q)) != NULL) GKI_freebuf(p_frag); diff --git a/stack/bnep/bnep_api.c b/stack/bnep/bnep_api.c index b1d8c7c2d..87b2cb5cf 100644 --- a/stack/bnep/bnep_api.c +++ b/stack/bnep/bnep_api.c @@ -432,7 +432,7 @@ tBNEP_RESULT BNEP_WriteBuf (UINT16 handle, } /* Check transmit queue */ - if (p_bcb->xmit_q.count >= BNEP_MAX_XMITQ_DEPTH) + if (GKI_queue_length(&p_bcb->xmit_q) >= BNEP_MAX_XMITQ_DEPTH) { GKI_freebuf (p_buf); return (BNEP_Q_SIZE_EXCEEDED); @@ -538,7 +538,7 @@ tBNEP_RESULT BNEP_Write (UINT16 handle, } /* Check transmit queue */ - if (p_bcb->xmit_q.count >= BNEP_MAX_XMITQ_DEPTH) + if (GKI_queue_length(&p_bcb->xmit_q) >= BNEP_MAX_XMITQ_DEPTH) return (BNEP_Q_SIZE_EXCEEDED); /* Get a buffer to copy teh data into */ @@ -762,7 +762,7 @@ tBNEP_RESULT BNEP_GetStatus (UINT16 handle, tBNEP_STATUS *p_status) p_status->con_status = BNEP_STATUS_CONNECTED; p_status->l2cap_cid = p_bcb->l2cap_cid; p_status->rem_mtu_size = p_bcb->rem_mtu_size; - p_status->xmit_q_depth = p_bcb->xmit_q.count; + p_status->xmit_q_depth = GKI_queue_length(&p_bcb->xmit_q); p_status->sent_num_filters = p_bcb->sent_num_filters; p_status->sent_mcast_filters = p_bcb->sent_mcast_filters; p_status->rcvd_num_filters = p_bcb->rcvd_num_filters; diff --git a/stack/bnep/bnep_utils.c b/stack/bnep/bnep_utils.c index 92061d010..89c471cbc 100644 --- a/stack/bnep/bnep_utils.c +++ b/stack/bnep/bnep_utils.c @@ -152,7 +152,7 @@ void bnepu_release_bcb (tBNEP_CONN *p_bcb) p_bcb->p_pending_data = NULL; /* Free transmit queue */ - while (p_bcb->xmit_q.count) + while (!GKI_queue_is_empty(&p_bcb->xmit_q)) { GKI_freebuf (GKI_dequeue (&p_bcb->xmit_q)); } @@ -455,7 +455,7 @@ void bnepu_check_send_packet (tBNEP_CONN *p_bcb, BT_HDR *p_buf) BNEP_TRACE_EVENT ("BNEP - bnepu_check_send_packet for CID: 0x%x", p_bcb->l2cap_cid); if (p_bcb->con_flags & BNEP_FLAGS_L2CAP_CONGESTED) { - if (p_bcb->xmit_q.count >= BNEP_MAX_XMITQ_DEPTH) + if (GKI_queue_length(&p_bcb->xmit_q) >= BNEP_MAX_XMITQ_DEPTH) { BNEP_TRACE_EVENT ("BNEP - congested, dropping buf, CID: 0x%x", p_bcb->l2cap_cid); diff --git a/stack/btm/btm_ble_bgconn.c b/stack/btm/btm_ble_bgconn.c index b33aa2e2c..2fabe23d6 100644 --- a/stack/btm/btm_ble_bgconn.c +++ b/stack/btm/btm_ble_bgconn.c @@ -705,7 +705,7 @@ BOOLEAN btm_send_pending_direct_conn(void ) tBTM_BLE_CONN_REQ *p_req; BOOLEAN rt = FALSE; - if ( btm_cb.ble_ctr_cb.conn_pending_q.count ) + if (!GKI_queue_is_empty(&btm_cb.ble_ctr_cb.conn_pending_q)) { p_req = (tBTM_BLE_CONN_REQ*)GKI_dequeue (&btm_cb.ble_ctr_cb.conn_pending_q); diff --git a/stack/btm/btm_pm.c b/stack/btm/btm_pm.c index 76bfc048b..446c88dfc 100644 --- a/stack/btm/btm_pm.c +++ b/stack/btm/btm_pm.c @@ -1053,7 +1053,7 @@ BOOLEAN btm_pm_device_in_scan_state(void) /* Scan state-paging, inquiry, and trying to connect */ /* Check for paging */ - if (btm_cb.is_paging || btm_cb.page_queue.count > 0 || + if (btm_cb.is_paging || GKI_queue_length(&btm_cb.page_queue) > 0 || BTM_BL_PAGING_STARTED == btm_cb.busy_level) { BTM_TRACE_DEBUG("btm_pm_device_in_scan_state- paging"); diff --git a/stack/btu/btu_hcif.c b/stack/btu/btu_hcif.c index 9ec8dd3cb..aea9800fc 100644 --- a/stack/btu/btu_hcif.c +++ b/stack/btu/btu_hcif.c @@ -460,7 +460,7 @@ void btu_hcif_send_cmd (UINT8 controller_id, BT_HDR *p_buf) #endif /* If there are already commands in the queue, then enqueue this command */ - if ((p_buf) && (p_hci_cmd_cb->cmd_xmit_q.count)) + if ((p_buf) && (!GKI_queue_is_empty(&p_hci_cmd_cb->cmd_xmit_q))) { GKI_enqueue (&(p_hci_cmd_cb->cmd_xmit_q), p_buf); p_buf = NULL; @@ -471,7 +471,7 @@ void btu_hcif_send_cmd (UINT8 controller_id, BT_HDR *p_buf) && (p_hci_cmd_cb->cmd_window == 0) && (btm_cb.devcb.state == BTM_DEV_STATE_WAIT_RESET_CMPLT)) ) { - p_hci_cmd_cb->cmd_window = p_hci_cmd_cb->cmd_xmit_q.count + 1; + p_hci_cmd_cb->cmd_window = GKI_queue_length(&p_hci_cmd_cb->cmd_xmit_q) + 1; } /* See if we can send anything */ diff --git a/stack/btu/btu_task.c b/stack/btu/btu_task.c index 8d1fd04df..9045dac7e 100644 --- a/stack/btu/btu_task.c +++ b/stack/btu/btu_task.c @@ -921,8 +921,8 @@ void btu_stop_timer_oneshot(TIMER_LIST_ENT *p_tle) { *******************************************************************************/ void btu_check_bt_sleep (void) { - if ((btu_cb.hci_cmd_cb[LOCAL_BR_EDR_CONTROLLER_ID].cmd_cmpl_q.count == 0) - &&(btu_cb.hci_cmd_cb[LOCAL_BR_EDR_CONTROLLER_ID].cmd_xmit_q.count == 0)) + if ((GKI_queue_is_empty(&btu_cb.hci_cmd_cb[LOCAL_BR_EDR_CONTROLLER_ID].cmd_cmpl_q) + && GKI_queue_is_empty(&btu_cb.hci_cmd_cb[LOCAL_BR_EDR_CONTROLLER_ID].cmd_xmit_q))) { if (l2cb.controller_xmit_window == l2cb.num_lm_acl_bufs) { diff --git a/stack/gap/gap_conn.c b/stack/gap/gap_conn.c index d1e96d88b..61f88928b 100644 --- a/stack/gap/gap_conn.c +++ b/stack/gap/gap_conn.c @@ -1173,10 +1173,10 @@ static void gap_release_ccb (tGAP_CCB *p_ccb) /* Drop any buffers we may be holding */ p_ccb->rx_queue_size = 0; - while (p_ccb->rx_queue.p_first) + while (!GKI_queue_is_empty(&p_ccb->rx_queue)) GKI_freebuf (GKI_dequeue (&p_ccb->rx_queue)); - while (p_ccb->tx_queue.p_first) + while (!GKI_queue_is_empty(&p_ccb->tx_queue)) GKI_freebuf (GKI_dequeue (&p_ccb->tx_queue)); p_ccb->con_state = GAP_CCB_STATE_IDLE; diff --git a/stack/gatt/gatt_auth.c b/stack/gatt/gatt_auth.c index 10cf76e88..ae5214592 100644 --- a/stack/gatt/gatt_auth.c +++ b/stack/gatt/gatt_auth.c @@ -192,7 +192,7 @@ void gatt_enc_cmpl_cback(BD_ADDR bd_addr, tBT_TRANSPORT transport, void *p_ref_d gatt_sec_check_complete(status , p_buf->p_clcb, p_tcb->sec_act); GKI_freebuf(p_buf); /* start all other pending operation in queue */ - count = p_tcb->pending_enc_clcb.count; + count = GKI_queue_length(&p_tcb->pending_enc_clcb); for (; count > 0; count --) { if ((p_buf = (tGATT_PENDING_ENC_CLCB *)GKI_dequeue (&p_tcb->pending_enc_clcb)) != NULL) @@ -246,7 +246,7 @@ void gatt_notify_enc_cmpl(BD_ADDR bd_addr) { gatt_set_sec_act(p_tcb, GATT_SEC_NONE); - count = p_tcb->pending_enc_clcb.count; + count = GKI_queue_length(&p_tcb->pending_enc_clcb); for (; count > 0; count --) { diff --git a/stack/gatt/gatt_db.c b/stack/gatt/gatt_db.c index b7887c975..b12ff5235 100644 --- a/stack/gatt/gatt_db.c +++ b/stack/gatt/gatt_db.c @@ -62,6 +62,8 @@ static tGATT_STATUS gatts_send_app_read_request(tGATT_TCB *p_tcb, UINT8 op_code, BOOLEAN gatts_init_service_db (tGATT_SVC_DB *p_db, tBT_UUID *p_service, BOOLEAN is_pri, UINT16 s_hdl, UINT16 num_handle) { + GKI_init_q(&p_db->svc_buffer); + if (!allocate_svc_db_buf(p_db)) { GATT_TRACE_ERROR("gatts_init_service_db failed, no resources"); diff --git a/stack/gatt/gatt_main.c b/stack/gatt/gatt_main.c index 3d9603514..f0a0229ec 100644 --- a/stack/gatt/gatt_main.c +++ b/stack/gatt/gatt_main.c @@ -102,6 +102,8 @@ void gatt_init (void) #endif gatt_cb.def_mtu_size = GATT_DEF_BLE_MTU_SIZE; GKI_init_q (&gatt_cb.sign_op_queue); + GKI_init_q (&gatt_cb.srv_chg_clt_q); + GKI_init_q (&gatt_cb.pending_new_srv_start_q); /* First, register fixed L2CAP channel for ATT over BLE */ fixed_reg.fixed_chnl_opts.mode = L2CAP_FCR_BASIC_MODE; fixed_reg.fixed_chnl_opts.max_transmit = 0xFF; diff --git a/stack/gatt/gatt_sr.c b/stack/gatt/gatt_sr.c index 21997946b..5f9ddf01b 100755 --- a/stack/gatt/gatt_sr.c +++ b/stack/gatt/gatt_sr.c @@ -106,7 +106,7 @@ void gatt_dequeue_sr_cmd (tGATT_TCB *p_tcb) GKI_freebuf (p_tcb->sr_cmd.p_rsp_msg); } - while (p_tcb->sr_cmd.multi_rsp_q.p_first) + while (GKI_getfirst(&p_tcb->sr_cmd.multi_rsp_q)) GKI_freebuf (GKI_dequeue (&p_tcb->sr_cmd.multi_rsp_q)); memset( &p_tcb->sr_cmd, 0, sizeof(tGATT_SR_CMD)); } @@ -145,9 +145,9 @@ static BOOLEAN process_read_multi_rsp (tGATT_SR_CMD *p_cmd, tGATT_STATUS status, if (status == GATT_SUCCESS) { GATT_TRACE_DEBUG ("Multi read count=%d num_hdls=%d", - p_cmd->multi_rsp_q.count, p_cmd->multi_req.num_handles); + GKI_queue_length(&p_cmd->multi_rsp_q), p_cmd->multi_req.num_handles); /* Wait till we get all the responses */ - if (p_cmd->multi_rsp_q.count == p_cmd->multi_req.num_handles) + if (GKI_queue_length(&p_cmd->multi_rsp_q) == p_cmd->multi_req.num_handles) { len = sizeof(BT_HDR) + L2CAP_MIN_OFFSET + mtu; if ((p_buf = (BT_HDR *)GKI_getbuf(len)) == NULL) diff --git a/stack/gatt/gatt_utils.c b/stack/gatt/gatt_utils.c index 0e841a92c..f0658ea2a 100644 --- a/stack/gatt/gatt_utils.c +++ b/stack/gatt/gatt_utils.c @@ -93,7 +93,7 @@ void gatt_free_pending_ind(tGATT_TCB *p_tcb) { GATT_TRACE_DEBUG("gatt_free_pending_ind"); /* release all queued indications */ - while (p_tcb->pending_ind_q.p_first) + while (!GKI_queue_is_empty(&p_tcb->pending_ind_q)) GKI_freebuf (GKI_dequeue (&p_tcb->pending_ind_q)); } @@ -110,7 +110,7 @@ void gatt_free_pending_enc_queue(tGATT_TCB *p_tcb) { GATT_TRACE_DEBUG("gatt_free_pending_enc_queue"); /* release all queued indications */ - while (p_tcb->pending_enc_clcb.p_first) + while (!GKI_queue_is_empty(&p_tcb->pending_enc_clcb)) GKI_freebuf (GKI_dequeue (&p_tcb->pending_enc_clcb)); } @@ -373,7 +373,7 @@ void gatt_free_hdl_buffer(tGATT_HDL_LIST_ELEM *p) if (p) { - while (p->svc_db.svc_buffer.p_first) + while (!GKI_queue_is_empty(&p->svc_db.svc_buffer)) GKI_freebuf (GKI_dequeue (&p->svc_db.svc_buffer)); memset(p, 0, sizeof(tGATT_HDL_LIST_ELEM)); } @@ -397,7 +397,7 @@ void gatt_free_srvc_db_buffer_app_id(tBT_UUID *p_app_id) { if (memcmp(p_app_id, &p_elem->asgn_range.app_uuid128, sizeof(tBT_UUID)) == 0) { - while (p_elem->svc_db.svc_buffer.p_first) + while (!GKI_queue_is_empty(&p_elem->svc_db.svc_buffer)) GKI_freebuf (GKI_dequeue (&p_elem->svc_db.svc_buffer)); p_elem->svc_db.mem_free = 0; @@ -1363,7 +1363,7 @@ UINT8 gatt_sr_alloc_rcb(tGATT_HDL_LIST_ELEM *p_list ) p_sreg->e_hdl = p_list->asgn_range.e_handle; p_sreg->p_db = &p_list->svc_db; - GATT_TRACE_DEBUG ("total GKI buffer in db [%d]",p_sreg->p_db->svc_buffer.count); + GATT_TRACE_DEBUG ("total GKI buffer in db [%d]",GKI_queue_length(&p_sreg->p_db->svc_buffer)); break; } } diff --git a/stack/l2cap/l2c_api.c b/stack/l2cap/l2c_api.c index 3d228d8c0..be7829ce8 100644 --- a/stack/l2cap/l2c_api.c +++ b/stack/l2cap/l2c_api.c @@ -1484,7 +1484,7 @@ UINT16 L2CA_SendFixedChnlData (UINT16 fixed_cid, BD_ADDR rem_bda, BT_HDR *p_buf) { L2CAP_TRACE_ERROR ("L2CAP - CID: 0x%04x cannot send, already congested \ xmit_hold_q.count: %u buff_quota: %u", fixed_cid, - p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->xmit_hold_q.count, + GKI_queue_length(&p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->xmit_hold_q), p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->buff_quota); GKI_freebuf (p_buf); return (L2CAP_DW_FAILED); @@ -1810,7 +1810,7 @@ UINT16 L2CA_FlushChannel (UINT16 lcid, UINT16 num_to_flush) if (num_to_flush != L2CAP_FLUSH_CHANS_GET) { L2CAP_TRACE_API ("L2CA_FlushChannel (FLUSH) CID: 0x%04x NumToFlush: %d QC: %u pFirst: 0x%08x", - lcid, num_to_flush, p_ccb->xmit_hold_q.count, p_ccb->xmit_hold_q.p_first); + lcid, num_to_flush, GKI_queue_length(&p_ccb->xmit_hold_q), GKI_getfirst(&p_ccb->xmit_hold_q)); } else { @@ -1838,7 +1838,7 @@ UINT16 L2CA_FlushChannel (UINT16 lcid, UINT16 num_to_flush) } #endif - p_buf = (BT_HDR *)p_lcb->link_xmit_data_q.p_first; + p_buf = (BT_HDR *)GKI_getfirst(&p_lcb->link_xmit_data_q); /* First flush the number we are asked to flush */ while ((p_buf != NULL) && (num_to_flush != 0)) @@ -1860,7 +1860,7 @@ UINT16 L2CA_FlushChannel (UINT16 lcid, UINT16 num_to_flush) } /* If needed, flush buffers in the CCB xmit hold queue */ - while ( (num_to_flush != 0) && (p_ccb->xmit_hold_q.count != 0) ) + while ( (num_to_flush != 0) && (!GKI_queue_is_empty(&p_ccb->xmit_hold_q))) { p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q); if (p_buf) @@ -1874,7 +1874,7 @@ UINT16 L2CA_FlushChannel (UINT16 lcid, UINT16 num_to_flush) (*p_ccb->p_rcb->api.pL2CA_TxComplete_Cb)(p_ccb->local_cid, num_flushed2); /* Now count how many are left */ - p_buf = (BT_HDR *)p_lcb->link_xmit_data_q.p_first; + p_buf = (BT_HDR *)GKI_getfirst(&p_lcb->link_xmit_data_q); while (p_buf != NULL) { @@ -1885,7 +1885,7 @@ UINT16 L2CA_FlushChannel (UINT16 lcid, UINT16 num_to_flush) } /* Add in the number in the CCB xmit queue */ - num_left += p_ccb->xmit_hold_q.count; + num_left += GKI_queue_length(&p_ccb->xmit_hold_q); /* Return the local number of buffers left for the CID */ L2CAP_TRACE_DEBUG ("L2CA_FlushChannel() flushed: %u + %u, num_left: %u", num_flushed1, num_flushed2, num_left); diff --git a/stack/l2cap/l2c_ble.c b/stack/l2cap/l2c_ble.c index d3ba6e229..f9c75e2c6 100644 --- a/stack/l2cap/l2c_ble.c +++ b/stack/l2cap/l2c_ble.c @@ -915,7 +915,7 @@ void l2c_ble_link_adjust_allocation (void) /* this link may have sent anything but some other link sent packets so */ /* so we may need a timer to kick off this link's transmissions. */ if ( (p_lcb->link_state == LST_CONNECTED) - && (p_lcb->link_xmit_data_q.count) + && (GKI_queue_length(&p_lcb->link_xmit_data_q)) && (p_lcb->sent_not_acked < p_lcb->link_xmit_quota) ) btu_start_timer (&p_lcb->timer_entry, BTU_TTYPE_L2CAP_LINK, L2CAP_LINK_FLOW_CONTROL_TOUT); } diff --git a/stack/l2cap/l2c_csm.c b/stack/l2cap/l2c_csm.c index fa261b211..5bf268fc6 100644 --- a/stack/l2cap/l2c_csm.c +++ b/stack/l2cap/l2c_csm.c @@ -789,7 +789,7 @@ static void l2c_csm_config (tL2C_CCB *p_ccb, UINT16 event, void *p_data) p_ccb->fcrb.connect_tick_count = GKI_get_os_tick_count(); #endif /* See if we can forward anything on the hold queue */ - if (p_ccb->xmit_hold_q.count) + if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) { l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL); } @@ -872,7 +872,7 @@ static void l2c_csm_config (tL2C_CCB *p_ccb, UINT16 event, void *p_data) #endif /* See if we can forward anything on the hold queue */ - if ( (p_ccb->chnl_state == CST_OPEN) && (p_ccb->xmit_hold_q.count) ) + if ( (p_ccb->chnl_state == CST_OPEN) && (!GKI_queue_is_empty(&p_ccb->xmit_hold_q))) { l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL); } diff --git a/stack/l2cap/l2c_fcr.c b/stack/l2cap/l2c_fcr.c index 6e12607cd..334730d46 100644 --- a/stack/l2cap/l2c_fcr.c +++ b/stack/l2cap/l2c_fcr.c @@ -233,13 +233,13 @@ void l2c_fcr_cleanup (tL2C_CCB *p_ccb) if (p_fcrb->p_rx_sdu) GKI_freebuf (p_fcrb->p_rx_sdu); - while (p_fcrb->waiting_for_ack_q.p_first) + while (!GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q)) GKI_freebuf (GKI_dequeue (&p_fcrb->waiting_for_ack_q)); - while (p_fcrb->srej_rcv_hold_q.p_first) + while (!GKI_queue_is_empty(&p_fcrb->srej_rcv_hold_q)) GKI_freebuf (GKI_dequeue (&p_fcrb->srej_rcv_hold_q)); - while (p_fcrb->retrans_q.p_first) + while (!GKI_queue_is_empty(&p_fcrb->retrans_q)) GKI_freebuf (GKI_dequeue (&p_fcrb->retrans_q)); btu_stop_quick_timer (&p_fcrb->ack_timer); @@ -390,10 +390,10 @@ BOOLEAN l2c_fcr_is_flow_controlled (tL2C_CCB *p_ccb) { /* Check if remote side flowed us off or the transmit window is full */ if ( (p_ccb->fcrb.remote_busy == TRUE) - || (p_ccb->fcrb.waiting_for_ack_q.count >= p_ccb->peer_cfg.fcr.tx_win_sz) ) + || (GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q) >= p_ccb->peer_cfg.fcr.tx_win_sz) ) { #if (L2CAP_ERTM_STATS == TRUE) - if (p_ccb->xmit_hold_q.count != 0) + if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) { p_ccb->fcrb.xmit_window_closed++; @@ -699,7 +699,7 @@ void l2c_fcr_proc_pdu (tL2C_CCB *p_ccb, BT_HDR *p_buf) L2CAP_TRACE_EVENT (" eRTM Rx Nxt_tx_seq %u, Lst_rx_ack %u, Nxt_seq_exp %u, Lst_ack_snt %u, wt_q.cnt %u, tries %u", p_ccb->fcrb.next_tx_seq, p_ccb->fcrb.last_rx_ack, p_ccb->fcrb.next_seq_expected, - p_ccb->fcrb.last_ack_sent, p_ccb->fcrb.waiting_for_ack_q.count, p_ccb->fcrb.num_tries); + p_ccb->fcrb.last_ack_sent, GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q), p_ccb->fcrb.num_tries); #endif /* BT_TRACE_VERBOSE */ @@ -768,7 +768,7 @@ void l2c_fcr_proc_pdu (tL2C_CCB *p_ccb, BT_HDR *p_buf) if (ctrl_word & L2CAP_FCR_S_FRAME_BIT) ctrl_word &= ~L2CAP_FCR_P_BIT; - if (p_ccb->fcrb.waiting_for_ack_q.count == 0) + if (GKI_queue_is_empty(&p_ccb->fcrb.waiting_for_ack_q)) p_ccb->fcrb.num_tries = 0; l2c_fcr_stop_timer (p_ccb); @@ -797,7 +797,7 @@ void l2c_fcr_proc_pdu (tL2C_CCB *p_ccb, BT_HDR *p_buf) return; /* If we have some buffers held while doing SREJ, and SREJ has cleared, process them now */ - if ( (!p_ccb->fcrb.local_busy) && (!p_ccb->fcrb.srej_sent) && (p_ccb->fcrb.srej_rcv_hold_q.count > 0) ) + if ( (!p_ccb->fcrb.local_busy) && (!p_ccb->fcrb.srej_sent) && (!GKI_queue_is_empty(&p_ccb->fcrb.srej_rcv_hold_q))) { BUFFER_Q temp_q = p_ccb->fcrb.srej_rcv_hold_q; @@ -845,7 +845,7 @@ void l2c_fcr_proc_pdu (tL2C_CCB *p_ccb, BT_HDR *p_buf) } /* If a window has opened, check if we can send any more packets */ - if ( (p_ccb->fcrb.retrans_q.count || p_ccb->xmit_hold_q.count) + if ( (!GKI_queue_is_empty(&p_ccb->fcrb.retrans_q) || !GKI_queue_is_empty(&p_ccb->xmit_hold_q)) && (p_ccb->fcrb.wait_ack == FALSE) && (l2c_fcr_is_flow_controlled (p_ccb) == FALSE) ) { @@ -866,7 +866,7 @@ void l2c_fcr_proc_tout (tL2C_CCB *p_ccb) { L2CAP_TRACE_DEBUG ("l2c_fcr_proc_tout: CID: 0x%04x num_tries: %u (max: %u) wait_ack: %u ack_q_count: %u", p_ccb->local_cid, p_ccb->fcrb.num_tries, p_ccb->peer_cfg.fcr.max_transmit, - p_ccb->fcrb.wait_ack, p_ccb->fcrb.waiting_for_ack_q.count); + p_ccb->fcrb.wait_ack, GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q)); #if (L2CAP_ERTM_STATS == TRUE) p_ccb->fcrb.retrans_touts++; @@ -939,7 +939,7 @@ static BOOLEAN process_reqseq (tL2C_CCB *p_ccb, UINT16 ctrl_word) && ((ctrl_word & L2CAP_FCR_P_BIT) == 0) ) { /* If anything still waiting for ack, restart the timer if it was stopped */ - if (p_fcrb->waiting_for_ack_q.count) + if (!GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q)) l2c_fcr_start_timer (p_ccb); return (TRUE); @@ -951,11 +951,11 @@ static BOOLEAN process_reqseq (tL2C_CCB *p_ccb, UINT16 ctrl_word) num_bufs_acked = (req_seq - p_fcrb->last_rx_ack) & L2CAP_FCR_SEQ_MODULO; /* Verify the request sequence is in range before proceeding */ - if (num_bufs_acked > p_fcrb->waiting_for_ack_q.count) + if (num_bufs_acked > GKI_queue_length(&p_fcrb->waiting_for_ack_q)) { /* The channel is closed if ReqSeq is not in range */ L2CAP_TRACE_WARNING ("L2CAP eRTM Frame BAD Req_Seq - ctrl_word: 0x%04x req_seq 0x%02x last_rx_ack: 0x%02x QCount: %u", - ctrl_word, req_seq, p_fcrb->last_rx_ack, p_fcrb->waiting_for_ack_q.count); + ctrl_word, req_seq, p_fcrb->last_rx_ack, GKI_queue_length(&p_fcrb->waiting_for_ack_q)); l2cu_disconnect_chnl (p_ccb); return (FALSE); @@ -979,7 +979,7 @@ static BOOLEAN process_reqseq (tL2C_CCB *p_ccb, UINT16 ctrl_word) for (xx = 0; xx < num_bufs_acked; xx++) { - ls = ((BT_HDR *)(p_fcrb->waiting_for_ack_q.p_first))->layer_specific & L2CAP_FCR_SAR_BITS; + ls = ((BT_HDR *)(GKI_getfirst(&p_fcrb->waiting_for_ack_q)))->layer_specific & L2CAP_FCR_SAR_BITS; if ( (ls == L2CAP_FCR_UNSEG_SDU) || (ls == L2CAP_FCR_END_SDU) ) full_sdus_xmitted++; @@ -995,7 +995,7 @@ static BOOLEAN process_reqseq (tL2C_CCB *p_ccb, UINT16 ctrl_word) if ( (p_ccb->p_rcb) && (p_ccb->p_rcb->api.pL2CA_TxComplete_Cb) && (full_sdus_xmitted) ) { /* Special case for eRTM, if all packets sent, send 0xFFFF */ - if ( (p_fcrb->waiting_for_ack_q.count == 0) && (p_ccb->xmit_hold_q.count == 0) ) + if (GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q) && (GKI_queue_is_empty(&p_ccb->xmit_hold_q))) full_sdus_xmitted = 0xFFFF; (*p_ccb->p_rcb->api.pL2CA_TxComplete_Cb)(p_ccb->local_cid, full_sdus_xmitted); @@ -1003,7 +1003,7 @@ static BOOLEAN process_reqseq (tL2C_CCB *p_ccb, UINT16 ctrl_word) } /* If anything still waiting for ack, restart the timer if it was stopped */ - if (p_fcrb->waiting_for_ack_q.count) + if (!GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q)) l2c_fcr_start_timer (p_ccb); return (TRUE); @@ -1165,9 +1165,9 @@ static void process_i_frame (tL2C_CCB *p_ccb, BT_HDR *p_buf, UINT16 ctrl_word, B if (p_fcrb->srej_sent) { /* If SREJ sent, save the frame for later processing as long as it is in sequence */ - next_srej = (((BT_HDR *)p_fcrb->srej_rcv_hold_q.p_last)->layer_specific + 1) & L2CAP_FCR_SEQ_MODULO; + next_srej = (((BT_HDR *)GKI_getlast(&p_fcrb->srej_rcv_hold_q))->layer_specific + 1) & L2CAP_FCR_SEQ_MODULO; - if ( (tx_seq == next_srej) && (p_fcrb->srej_rcv_hold_q.count < p_ccb->our_cfg.fcr.tx_win_sz) ) + if ( (tx_seq == next_srej) && (GKI_queue_length(&p_fcrb->srej_rcv_hold_q) < p_ccb->our_cfg.fcr.tx_win_sz) ) { /* If user gave us a pool for held rx buffers, use that */ if (p_ccb->ertm_info.fcr_rx_pool_id != HCI_ACL_POOL_ID) @@ -1197,7 +1197,7 @@ static void process_i_frame (tL2C_CCB *p_ccb, BT_HDR *p_buf, UINT16 ctrl_word, B else { L2CAP_TRACE_WARNING ("process_i_frame() CID: 0x%04x frame dropped in Srej Sent next_srej:%u hold_q.count:%u win_sz:%u", - p_ccb->local_cid, next_srej, p_fcrb->srej_rcv_hold_q.count, p_ccb->our_cfg.fcr.tx_win_sz); + p_ccb->local_cid, next_srej, GKI_queue_length(&p_fcrb->srej_rcv_hold_q), p_ccb->our_cfg.fcr.tx_win_sz); p_fcrb->rej_after_srej = TRUE; GKI_freebuf (p_buf); @@ -1225,10 +1225,10 @@ static void process_i_frame (tL2C_CCB *p_ccb, BT_HDR *p_buf, UINT16 ctrl_word, B } else { - if (p_fcrb->srej_rcv_hold_q.count != 0) + if (!GKI_queue_is_empty(&p_fcrb->srej_rcv_hold_q)) { L2CAP_TRACE_ERROR ("process_i_frame() CID: 0x%04x sending SREJ tx_seq:%d hold_q.count:%u", - p_ccb->local_cid, tx_seq, p_fcrb->srej_rcv_hold_q.count); + p_ccb->local_cid, tx_seq, GKI_queue_length(&p_fcrb->srej_rcv_hold_q)); } p_buf->layer_specific = tx_seq; GKI_enqueue (&p_fcrb->srej_rcv_hold_q, p_buf); @@ -1275,8 +1275,8 @@ static void process_i_frame (tL2C_CCB *p_ccb, BT_HDR *p_buf, UINT16 ctrl_word, B (L2CAP_FCR_ACK_TOUT*QUICK_TIMER_TICKS_PER_SEC)/1000); } } - else if ( ((p_ccb->xmit_hold_q.count == 0) || (l2c_fcr_is_flow_controlled (p_ccb))) - && (p_ccb->fcrb.srej_rcv_hold_q.count == 0) ) + else if ( ((GKI_queue_is_empty(&p_ccb->xmit_hold_q)) || (l2c_fcr_is_flow_controlled (p_ccb))) + && (GKI_queue_is_empty(&p_ccb->fcrb.srej_rcv_hold_q))) { if (p_fcrb->local_busy) l2c_fcr_send_S_frame (p_ccb, L2CAP_FCR_SUP_RNR, 0); @@ -1517,13 +1517,13 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq) UINT8 buf_seq; UINT16 ctrl_word; - if ( (p_ccb->fcrb.waiting_for_ack_q.p_first) + if ( (GKI_getfirst(&p_ccb->fcrb.waiting_for_ack_q)) && (p_ccb->peer_cfg.fcr.max_transmit != 0) && (p_ccb->fcrb.num_tries >= p_ccb->peer_cfg.fcr.max_transmit) ) { L2CAP_TRACE_EVENT ("Max Tries Exceeded: (last_acq: %d CID: 0x%04x num_tries: %u (max: %u) ack_q_count: %u", p_ccb->fcrb.last_rx_ack, p_ccb->local_cid, p_ccb->fcrb.num_tries, p_ccb->peer_cfg.fcr.max_transmit, - p_ccb->fcrb.waiting_for_ack_q.count); + GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q)); l2cu_disconnect_chnl (p_ccb); return (FALSE); @@ -1534,7 +1534,7 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq) { /* If sending only one, the sequence number tells us which one. Look for it. */ - for (p_buf = (BT_HDR *)p_ccb->fcrb.waiting_for_ack_q.p_first; p_buf; p_buf = (BT_HDR *)GKI_getnext (p_buf)) + for (p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->fcrb.waiting_for_ack_q); p_buf; p_buf = (BT_HDR *)GKI_getnext (p_buf)) { /* Get the old control word */ p = ((UINT8 *) (p_buf+1)) + p_buf->offset + L2CAP_PKT_OVERHEAD; @@ -1551,7 +1551,7 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq) if (!p_buf) { - L2CAP_TRACE_ERROR ("retransmit_i_frames() UNKNOWN seq: %u q_count: %u", tx_seq, p_ccb->fcrb.waiting_for_ack_q.count); + L2CAP_TRACE_ERROR ("retransmit_i_frames() UNKNOWN seq: %u q_count: %u", tx_seq, GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q)); return (TRUE); } } @@ -1559,7 +1559,7 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq) { /* Retransmitting everything. Flush buffers we already put in the link xmit queue. */ - p_buf = (BT_HDR *)p_ccb->p_lcb->link_xmit_data_q.p_first; + p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->p_lcb->link_xmit_data_q); while (p_buf != NULL) { @@ -1577,10 +1577,10 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq) } /* Also flush our retransmission queue */ - while (p_ccb->fcrb.retrans_q.p_first) + while (!GKI_queue_is_empty(&p_ccb->fcrb.retrans_q)) GKI_freebuf (GKI_dequeue (&p_ccb->fcrb.retrans_q)); - p_buf = (BT_HDR *)p_ccb->fcrb.waiting_for_ack_q.p_first; + p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->fcrb.waiting_for_ack_q); } while (p_buf != NULL) @@ -1602,7 +1602,7 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq) l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL); - if (p_ccb->fcrb.waiting_for_ack_q.count) + if (GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q)) { p_ccb->fcrb.num_tries++; l2c_fcr_start_timer (p_ccb); @@ -1633,7 +1633,7 @@ BT_HDR *l2c_fcr_get_next_xmit_sdu_seg (tL2C_CCB *p_ccb, UINT16 max_packet_length /* If there is anything in the retransmit queue, that goes first */ - if (p_ccb->fcrb.retrans_q.p_first) + if (GKI_getfirst(&p_ccb->fcrb.retrans_q)) { p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->fcrb.retrans_q); @@ -1668,7 +1668,7 @@ BT_HDR *l2c_fcr_get_next_xmit_sdu_seg (tL2C_CCB *p_ccb, UINT16 max_packet_length max_pdu = max_packet_length - L2CAP_MAX_HEADER_FCS; } - p_buf = (BT_HDR *)p_ccb->xmit_hold_q.p_first; + p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->xmit_hold_q); /* If there is more data than the MPS, it requires segmentation */ if (p_buf->len > max_pdu) diff --git a/stack/l2cap/l2c_link.c b/stack/l2cap/l2c_link.c index 240ead3dc..55978a236 100644 --- a/stack/l2cap/l2c_link.c +++ b/stack/l2cap/l2c_link.c @@ -803,7 +803,7 @@ void l2c_link_adjust_allocation (void) /* this link may have sent anything but some other link sent packets so */ /* so we may need a timer to kick off this link's transmissions. */ if ( (p_lcb->link_state == LST_CONNECTED) - && (p_lcb->link_xmit_data_q.count) + && (!GKI_queue_is_empty(&p_lcb->link_xmit_data_q)) && (p_lcb->sent_not_acked < p_lcb->link_xmit_quota) ) btu_start_timer (&p_lcb->timer_entry, BTU_TTYPE_L2CAP_LINK, L2CAP_LINK_FLOW_CONTROL_TOUT); } @@ -1066,11 +1066,11 @@ BOOLEAN l2c_link_check_power_mode (tL2C_LCB *p_lcb) /* * We only switch park to active only if we have unsent packets */ - if ( p_lcb->link_xmit_data_q.count == 0 ) + if ( GKI_queue_is_empty(&p_lcb->link_xmit_data_q)) { for (p_ccb = p_lcb->ccb_queue.p_first_ccb; p_ccb; p_ccb = p_ccb->p_next_ccb) { - if (p_ccb->xmit_hold_q.count != 0) + if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) { need_to_active = TRUE; break; @@ -1261,7 +1261,7 @@ void l2c_link_check_send_pkts (tL2C_LCB *p_lcb, tL2C_CCB *p_ccb, BT_HDR *p_buf) /* There is a special case where we have readjusted the link quotas and */ /* this link may have sent anything but some other link sent packets so */ /* so we may need a timer to kick off this link's transmissions. */ - if ( (p_lcb->link_xmit_data_q.count) && (p_lcb->sent_not_acked < p_lcb->link_xmit_quota) ) + if ( (!GKI_queue_is_empty(&p_lcb->link_xmit_data_q)) && (p_lcb->sent_not_acked < p_lcb->link_xmit_quota) ) btu_start_timer (&p_lcb->timer_entry, BTU_TTYPE_L2CAP_LINK, L2CAP_LINK_FLOW_CONTROL_TOUT); } diff --git a/stack/l2cap/l2c_main.c b/stack/l2cap/l2c_main.c index 325f7af8e..1b45713e0 100755 --- a/stack/l2cap/l2c_main.c +++ b/stack/l2cap/l2c_main.c @@ -152,11 +152,11 @@ void l2c_rcv_acl_data (BT_HDR *p_msg) { L2CAP_TRACE_WARNING ("L2CAP - holding ACL for unknown handle:%d ls:%d cid:%d opcode:%d cur count:%d", handle, p_msg->layer_specific, rcv_cid, cmd_code, - l2cb.rcv_hold_q.count); + GKI_queue_length(&l2cb.rcv_hold_q)); p_msg->layer_specific = 2; GKI_enqueue (&l2cb.rcv_hold_q, p_msg); - if (l2cb.rcv_hold_q.count == 1) + if (GKI_queue_length(&l2cb.rcv_hold_q) == 1) btu_start_timer (&l2cb.rcv_hold_tle, BTU_TTYPE_L2CAP_HOLD, BT_1SEC_TIMEOUT); return; @@ -164,7 +164,7 @@ void l2c_rcv_acl_data (BT_HDR *p_msg) else { L2CAP_TRACE_ERROR ("L2CAP - rcvd ACL for unknown handle:%d ls:%d cid:%d opcode:%d cur count:%d", - handle, p_msg->layer_specific, rcv_cid, cmd_code, l2cb.rcv_hold_q.count); + handle, p_msg->layer_specific, rcv_cid, cmd_code, GKI_queue_length(&l2cb.rcv_hold_q)); } GKI_freebuf (p_msg); return; @@ -816,7 +816,7 @@ void l2c_process_held_packets (BOOLEAN timed_out) BT_HDR *p_buf, *p_buf1; BUFFER_Q *p_rcv_hold_q = &l2cb.rcv_hold_q; - if (!p_rcv_hold_q->count) + if (GKI_queue_is_empty(p_rcv_hold_q)) return; if (!timed_out) @@ -842,7 +842,7 @@ void l2c_process_held_packets (BOOLEAN timed_out) } /* If anyone still in the queue, restart the timeout */ - if (p_rcv_hold_q->count) + if (!GKI_queue_is_empty(p_rcv_hold_q)) btu_start_timer (&l2cb.rcv_hold_tle, BTU_TTYPE_L2CAP_HOLD, BT_1SEC_TIMEOUT); } @@ -984,7 +984,7 @@ UINT8 l2c_data_write (UINT16 cid, BT_HDR *p_data, UINT16 flags) if (p_ccb->cong_sent) { L2CAP_TRACE_ERROR ("L2CAP - CID: 0x%04x cannot send, already congested xmit_hold_q.count: %u buff_quota: %u", - p_ccb->local_cid, p_ccb->xmit_hold_q.count, p_ccb->buff_quota); + p_ccb->local_cid, GKI_queue_length(&p_ccb->xmit_hold_q), p_ccb->buff_quota); GKI_freebuf (p_data); return (L2CAP_DW_FAILED); diff --git a/stack/l2cap/l2c_utils.c b/stack/l2cap/l2c_utils.c index 77849ac12..2d54ca502 100644 --- a/stack/l2cap/l2c_utils.c +++ b/stack/l2cap/l2c_utils.c @@ -210,7 +210,7 @@ void l2cu_release_lcb (tL2C_LCB *p_lcb) btm_acl_removed (p_lcb->remote_bd_addr, BT_TRANSPORT_BR_EDR); #endif /* Release any held buffers */ - while (p_lcb->link_xmit_data_q.p_first) + while (!GKI_queue_is_empty(&p_lcb->link_xmit_data_q)) GKI_freebuf (GKI_dequeue (&p_lcb->link_xmit_data_q)); #if (L2CAP_UCD_INCLUDED == TRUE) @@ -934,7 +934,7 @@ void l2cu_send_peer_disc_req (tL2C_CCB *p_ccb) */ if (p_ccb->peer_cfg.fcr.mode == L2CAP_FCR_BASIC_MODE) { - while (p_ccb->xmit_hold_q.p_first) + while (GKI_getfirst(&p_ccb->xmit_hold_q)) { p_buf2 = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q); l2cu_set_acl_hci_header (p_buf2, p_ccb); @@ -1685,7 +1685,7 @@ void l2cu_release_ccb (tL2C_CCB *p_ccb) /* Stop the timer */ btu_stop_timer (&p_ccb->timer_entry); - while (p_ccb->xmit_hold_q.p_first) + while (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) GKI_freebuf (GKI_dequeue (&p_ccb->xmit_hold_q)); l2c_fcr_cleanup (p_ccb); @@ -3108,7 +3108,7 @@ static tL2C_CCB *l2cu_get_next_channel_in_rr(tL2C_LCB *p_lcb) } L2CAP_TRACE_DEBUG("RR scan pri=%d, lcid=0x%04x, q_cout=%d", - p_ccb->ccb_priority, p_ccb->local_cid, p_ccb->xmit_hold_q.count ); + p_ccb->ccb_priority, p_ccb->local_cid, GKI_queue_length(&p_ccb->xmit_hold_q)); /* store the next serving channel */ /* this channel is the last channel of its priority group */ @@ -3133,9 +3133,9 @@ static tL2C_CCB *l2cu_get_next_channel_in_rr(tL2C_LCB *p_lcb) if (p_ccb->fcrb.wait_ack || p_ccb->fcrb.remote_busy) continue; - if ( p_ccb->fcrb.retrans_q.count == 0 ) + if ( GKI_queue_is_empty(&p_ccb->fcrb.retrans_q)) { - if ( p_ccb->xmit_hold_q.count == 0 ) + if ( GKI_queue_is_empty(&p_ccb->xmit_hold_q)) continue; /* If using the common pool, should be at least 10% free. */ @@ -3149,7 +3149,7 @@ static tL2C_CCB *l2cu_get_next_channel_in_rr(tL2C_LCB *p_lcb) } else { - if (p_ccb->xmit_hold_q.count == 0) + if (GKI_queue_is_empty(&p_ccb->xmit_hold_q)) continue; } @@ -3259,9 +3259,9 @@ BT_HDR *l2cu_get_next_buffer_to_send (tL2C_LCB *p_lcb) continue; /* No more checks needed if sending from the reatransmit queue */ - if (p_ccb->fcrb.retrans_q.count == 0) + if (GKI_queue_is_empty(&p_ccb->fcrb.retrans_q)) { - if (p_ccb->xmit_hold_q.count == 0) + if (GKI_queue_is_empty(&p_ccb->xmit_hold_q)) continue; /* If using the common pool, should be at least 10% free. */ @@ -3282,7 +3282,7 @@ BT_HDR *l2cu_get_next_buffer_to_send (tL2C_LCB *p_lcb) } else { - if (p_ccb->xmit_hold_q.count != 0) + if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) { p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q); if(NULL == p_buf) @@ -3408,7 +3408,7 @@ void l2cu_set_acl_hci_header (BT_HDR *p_buf, tL2C_CCB *p_ccb) *******************************************************************************/ void l2cu_check_channel_congestion (tL2C_CCB *p_ccb) { - UINT16 q_count = p_ccb->xmit_hold_q.count; + UINT16 q_count = GKI_queue_length(&p_ccb->xmit_hold_q); #if (L2CAP_UCD_INCLUDED == TRUE) if ( p_ccb->local_cid == L2CAP_CONNECTIONLESS_CID ) diff --git a/stack/rfcomm/port_api.c b/stack/rfcomm/port_api.c index 364308838..83c94da02 100644 --- a/stack/rfcomm/port_api.c +++ b/stack/rfcomm/port_api.c @@ -1132,7 +1132,7 @@ int PORT_Purge (UINT16 handle, UINT8 purge_flags) { PORT_SCHEDULE_LOCK; /* to prevent missing credit */ - count = p_port->rx.queue.count; + count = GKI_queue_length(&p_port->rx.queue); while ((p_buf = (BT_HDR *)GKI_dequeue (&p_port->rx.queue)) != NULL) GKI_freebuf (p_buf); @@ -1368,7 +1368,7 @@ static int port_write (tPORT *p_port, BT_HDR *p_buf) (PORT_CTRL_REQ_SENT | PORT_CTRL_IND_RECEIVED))) { if ((p_port->tx.queue_size > PORT_TX_CRITICAL_WM) - || (p_port->tx.queue.count > PORT_TX_BUF_CRITICAL_WM)) + || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_CRITICAL_WM)) { RFCOMM_TRACE_WARNING ("PORT_Write: Queue size: %d", p_port->tx.queue_size); @@ -1526,7 +1526,7 @@ int PORT_WriteDataCO (UINT16 handle, int* p_len) /* data fits into the end of the queue */ PORT_SCHEDULE_LOCK; - if (((p_buf = (BT_HDR *)p_port->tx.queue.p_last) != NULL) + if (((p_buf = (BT_HDR *)GKI_getlast(&p_port->tx.queue)) != NULL) && (((int)p_buf->len + available) <= (int)p_port->peer_mtu) && (((int)p_buf->len + available) <= (int)length)) { @@ -1560,12 +1560,12 @@ int PORT_WriteDataCO (UINT16 handle, int* p_len) { /* if we're over buffer high water mark, we're done */ if ((p_port->tx.queue_size > PORT_TX_HIGH_WM) - || (p_port->tx.queue.count > PORT_TX_BUF_HIGH_WM)) + || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM)) { port_flow_control_user(p_port); event |= PORT_EV_FC; debug("tx queue is full,tx.queue_size:%d,tx.queue.count:%d,available:%d", - p_port->tx.queue_size, p_port->tx.queue.count, available); + p_port->tx.queue_size, GKI_queue_length(&p_port->tx.queue), available); break; } @@ -1677,7 +1677,7 @@ int PORT_WriteData (UINT16 handle, char *p_data, UINT16 max_len, UINT16 *p_len) /* data fits into the end of the queue */ PORT_SCHEDULE_LOCK; - if (((p_buf = (BT_HDR *)p_port->tx.queue.p_last) != NULL) + if (((p_buf = (BT_HDR *)GKI_getlast(&p_port->tx.queue)) != NULL) && ((p_buf->len + max_len) <= p_port->peer_mtu) && ((p_buf->len + max_len) <= length)) { @@ -1698,7 +1698,7 @@ int PORT_WriteData (UINT16 handle, char *p_data, UINT16 max_len, UINT16 *p_len) { /* if we're over buffer high water mark, we're done */ if ((p_port->tx.queue_size > PORT_TX_HIGH_WM) - || (p_port->tx.queue.count > PORT_TX_BUF_HIGH_WM)) + || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM)) break; /* continue with rfcomm data write */ diff --git a/stack/rfcomm/port_rfc.c b/stack/rfcomm/port_rfc.c index 5704c017c..3fa301380 100644 --- a/stack/rfcomm/port_rfc.c +++ b/stack/rfcomm/port_rfc.c @@ -865,7 +865,7 @@ void PORT_DataInd (tRFC_MCB *p_mcb, UINT8 dlci, BT_HDR *p_buf) /* Check if rx queue exceeds the limit */ if ((p_port->rx.queue_size + p_buf->len > PORT_RX_CRITICAL_WM) - || (p_port->rx.queue.count + 1 > p_port->rx_buf_critical)) + || (GKI_queue_length(&p_port->rx.queue) + 1 > p_port->rx_buf_critical)) { RFCOMM_TRACE_EVENT ("PORT_DataInd. Buffer over run. Dropping the buffer"); GKI_freebuf (p_buf); diff --git a/stack/rfcomm/port_utils.c b/stack/rfcomm/port_utils.c index 642ddb88f..7d29336dd 100644 --- a/stack/rfcomm/port_utils.c +++ b/stack/rfcomm/port_utils.c @@ -420,7 +420,7 @@ UINT32 port_flow_control_user (tPORT *p_port) || !p_port->rfc.p_mcb || !p_port->rfc.p_mcb->peer_ready || (p_port->tx.queue_size > PORT_TX_HIGH_WM) - || (p_port->tx.queue.count > PORT_TX_BUF_HIGH_WM); + || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM); if (p_port->tx.user_fc == fc) return (0); @@ -536,7 +536,7 @@ void port_flow_control_peer(tPORT *p_port, BOOLEAN enable, UINT16 count) p_port->rx.peer_fc = TRUE; } /* if queue count reached credit rx max, set peer fc */ - else if (p_port->rx.queue.count >= p_port->credit_rx_max) + else if (GKI_queue_length(&p_port->rx.queue) >= p_port->credit_rx_max) { p_port->rx.peer_fc = TRUE; } @@ -552,7 +552,7 @@ void port_flow_control_peer(tPORT *p_port, BOOLEAN enable, UINT16 count) /* check if it can be resumed now */ if (p_port->rx.peer_fc && (p_port->rx.queue_size < PORT_RX_LOW_WM) - && (p_port->rx.queue.count < PORT_RX_BUF_LOW_WM)) + && (GKI_queue_length(&p_port->rx.queue) < PORT_RX_BUF_LOW_WM)) { p_port->rx.peer_fc = FALSE; @@ -573,7 +573,7 @@ void port_flow_control_peer(tPORT *p_port, BOOLEAN enable, UINT16 count) /* Check the size of the rx queue. If it exceeds certain */ /* level and flow control has not been sent to the peer do it now */ else if ( ((p_port->rx.queue_size > PORT_RX_HIGH_WM) - || (p_port->rx.queue.count > PORT_RX_BUF_HIGH_WM)) + || (GKI_queue_length(&p_port->rx.queue) > PORT_RX_BUF_HIGH_WM)) && !p_port->rx.peer_fc) { RFCOMM_TRACE_EVENT ("PORT_DataInd Data reached HW. Sending FC set."); diff --git a/stack/rfcomm/rfc_port_fsm.c b/stack/rfcomm/rfc_port_fsm.c index a998b6ec9..c0d7fbc35 100644 --- a/stack/rfcomm/rfc_port_fsm.c +++ b/stack/rfcomm/rfc_port_fsm.c @@ -431,7 +431,7 @@ void rfc_port_sm_opened (tPORT *p_port, UINT16 event, void *p_data) case RFC_EVENT_DISC: p_port->rfc.state = RFC_STATE_CLOSED; rfc_send_ua (p_port->rfc.p_mcb, p_port->dlci); - if(p_port->rx.queue.count) + if(!GKI_queue_is_empty(&p_port->rx.queue)) { /* give a chance to upper stack to close port properly */ RFCOMM_TRACE_DEBUG("port queue is not empty"); |