Skip to content
Snippets Groups Projects
Commit a26e75f5 authored by dmitriy.gerasimov's avatar dmitriy.gerasimov
Browse files

[+] Some MacOS specifics

[!] Fixed old bug in worker queue allocation
parent c593893e
No related branches found
No related tags found
No related merge requests found
Pipeline #7281 passed with stage
in 13 seconds
......@@ -24,7 +24,8 @@ darwin {
DEFINES += _GNU_SOURCE
include(src/darwin/darwin.pri)
DEFINES += DAP_OS_DARWIN DAP_OS_BSD
LIBS -= -lrt
LIBS+ = -lrt -ljson-c -lmagic
QMAKE_LIBDIR += /usr/local/lib
}
win32 {
......
......@@ -5,7 +5,9 @@ HEADERS += $$PWD/hash.h \
$$PWD/picnic_types.h \
$$PWD/platform.h
macos { HEADERS += $$PWD/macos_specific_endian.h }
macos {
HEADERS += $$PWD/macos_specific_endian.h
}
SOURCES += $$PWD/hash.c \
$$PWD/lowmc_constants.c \
......
......@@ -335,7 +335,10 @@ int dap_events_start( dap_events_t *a_events )
// Link queues between
for( uint32_t i = 0; i < s_threads_count; i++) {
dap_worker_t * l_worker = s_workers[i];
l_worker->queue_es_io_input = DAP_NEW_S_SIZE(dap_events_socket_t*, sizeof (dap_events_socket_t*)* s_threads_count);
l_worker->queue_es_new_input = DAP_NEW_Z_SIZE(dap_events_socket_t*, sizeof (dap_events_socket_t*)* s_threads_count);
l_worker->queue_es_delete_input = DAP_NEW_Z_SIZE(dap_events_socket_t*, sizeof (dap_events_socket_t*)* s_threads_count);
l_worker->queue_es_reassign_input = DAP_NEW_Z_SIZE(dap_events_socket_t*, sizeof (dap_events_socket_t*)* s_threads_count);
l_worker->queue_es_io_input = DAP_NEW_Z_SIZE(dap_events_socket_t*, sizeof (dap_events_socket_t*)* s_threads_count);
for( uint32_t n = 0; n < s_threads_count; n++) {
l_worker->queue_es_new_input[n] = dap_events_socket_queue_ptr_create_input(s_workers[n]->queue_es_new);
l_worker->queue_es_delete_input[n] = dap_events_socket_queue_ptr_create_input(s_workers[n]->queue_es_delete);
......
......@@ -219,9 +219,10 @@ void *dap_worker_thread(void *arg)
l_cur = l_worker->poll_esocket[n];
//log_it(L_DEBUG, "flags: returned events 0x%0X requested events 0x%0X",l_worker->poll[n].revents,l_worker->poll[n].events );
#elif defined (DAP_EVENTS_CAPS_KQUEUE)
struct kevent * l_kevent_selected = l_cur->kqueue_event_catched = &l_worker->kqueue_events[n];
struct kevent * l_kevent_selected = &l_worker->kqueue_events[n];
l_cur = (dap_events_socket_t*) l_kevent_selected->udata;
assert(l_cur);
l_cur->kqueue_event_catched = l_kevent_selected;
#ifndef DAP_OS_DARWIN
u_int l_cur_flags = l_kevent_selected->flags;
#else
......@@ -316,7 +317,8 @@ void *dap_worker_thread(void *arg)
dap_events_socket_set_writable_unsafe(l_cur, false);
l_cur->buf_out_size = 0;
l_cur->flags |= DAP_SOCK_SIGNAL_CLOSE;
l_cur->callbacks.error_callback(l_cur, l_sock_err); // Call callback to process error event
if(l_cur->callbacks.error_callback)
l_cur->callbacks.error_callback(l_cur, l_sock_err); // Call callback to process error event
}
/*if (l_flag_hup) {
......
......@@ -12,6 +12,7 @@ INCLUDEPATH += \
$$PWD/../net/server/http_server/http_client/include \
$$PWD/../net/server/http_server/include \
$$PWD/../net/server/enc_server/include \
$$PWD/../net/server/notify_server/include \
$$PWD/../net/server/json_rpc/include \
$$PWD/../net/server/http_server \
$$PWD/../net/stream/session/include \
......
......@@ -12,14 +12,21 @@ SOURCES += $$PWD/enc_server/dap_enc_http.c \
#http_server
HEADERS += $$PWD/http_server/include/dap_http.h \
$$PWD/http_server/include/dap_http_cache.h \
$$PWD/http_server/include/dap_http_folder.h \
$$PWD/http_server/include/http_status_code.h \
$$PWD/http_server/include/dap_http_simple.h
SOURCES += $$PWD/http_server/dap_http.c \
$$PWD/http_server/dap_http_cache.c \
$$PWD/http_server/dap_http_folder.c \
$$PWD/http_server/dap_http_simple.c
# notify server
#notify_server
HEADERS += $$PWD/notify_server/include/dap_notify_srv.h
SOURCES += $$PWD/notify_server/src/dap_notify_srv.c
include (../server/http_server/http_client/http.pri)
......@@ -43,4 +50,6 @@ SOURCES += $$PWD/json_rpc/src/dap_json_rpc.c \
$$PWD/json_rpc/src/dap_json_rpc_response_handler.c
INCLUDEPATH += $$PWD/include
darwin{
LIBS += -ljson-c -lmagic
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment