Skip to content
Snippets Groups Projects
Commit d62ce57f authored by dmitriy.gerasimov's avatar dmitriy.gerasimov
Browse files

Merge branch 'hotfix-4787' into 'master'

hotfix-4787

See merge request !328
parents accde308 f732f3d3
No related branches found
No related tags found
2 merge requests!330features-4787,!328hotfix-4787
Pipeline #8035 passed with stage
in 3 seconds
......@@ -856,30 +856,30 @@ void *dap_worker_thread(void *arg)
}
#ifdef DAP_EVENTS_CAPS_POLL
/***********************************************************/
/* If the compress_array flag was turned on, we need */
/* to squeeze together the array and decrement the number */
/* of file descriptors. We do not need to move back the */
/* events and revents fields because the events will always*/
/* be POLLIN in this case, and revents is output. */
/***********************************************************/
if ( l_worker->poll_compress){
l_worker->poll_compress = false;
for (size_t i = 0; i < l_worker->poll_count ; i++) {
if ( l_worker->poll[i].fd == -1){
if( l_worker->poll_count){
for(size_t j = i; j < l_worker->poll_count-1; j++){
l_worker->poll[j].fd = l_worker->poll[j+1].fd;
l_worker->poll_esocket[j] = l_worker->poll_esocket[j+1];
if(l_worker->poll_esocket[j])
l_worker->poll_esocket[j]->poll_index = j;
}
}
i--;
l_worker->poll_count--;
}
}
}
/***********************************************************/
/* If the compress_array flag was turned on, we need */
/* to squeeze together the array and decrement the number */
/* of file descriptors. */
/***********************************************************/
if ( l_worker->poll_compress){
l_worker->poll_compress = false;
for (size_t i = 0; i < l_worker->poll_count ; i++) {
if ( l_worker->poll[i].fd == -1){
if( l_worker->poll_count){
for(size_t j = i; j < l_worker->poll_count-1; j++){
l_worker->poll[j].fd = l_worker->poll[j+1].fd;
l_worker->poll[j].events = l_worker->poll[j+1].events;
l_worker->poll[j].revents = l_worker->poll[j+1].revents;
l_worker->poll_esocket[j] = l_worker->poll_esocket[j+1];
if(l_worker->poll_esocket[j])
l_worker->poll_esocket[j]->poll_index = j;
}
}
i--;
l_worker->poll_count--;
}
}
}
#endif
} // while
log_it(L_NOTICE,"Exiting thread #%u", l_worker->id);
......
......@@ -228,8 +228,8 @@ static bool s_timer_update_states_callback(void * a_arg )
l_chain_id.uint64, l_net->pub.cell_id.uint64,
&l_sync_gdb, sizeof(l_sync_gdb));
}
return true;
}
return true;
}
}
}
......@@ -832,12 +832,14 @@ int dap_chain_node_client_set_callbacks(dap_client_t *a_client, uint8_t a_ch_id)
l_ch_chain->callback_notify_packet_out = s_ch_chain_callback_notify_packet_out;
l_ch_chain->callback_notify_packet_in = s_ch_chain_callback_notify_packet_in;
l_ch_chain->callback_notify_arg = l_node_client;
memcpy(&l_node_client->ch_chain_uuid, &l_ch->uuid, sizeof(dap_stream_ch_uuid_t));
}
// N
if(a_ch_id == dap_stream_ch_chain_net_get_id()) {
dap_stream_ch_chain_net_t *l_ch_chain = DAP_STREAM_CH_CHAIN_NET(l_ch);
l_ch_chain->notify_callback = s_ch_chain_callback_notify_packet_in2;
l_ch_chain->notify_callback_arg = l_node_client;
memcpy(&l_node_client->ch_chain_net_uuid, &l_ch->uuid, sizeof(dap_stream_ch_uuid_t));
}
// R
if(a_ch_id == dap_stream_ch_chain_net_srv_get_id()) {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment