diff --git a/core/null.c b/core/null.c index c1b5705..8fd63c8 100644 --- a/core/null.c +++ b/core/null.c @@ -19,6 +19,7 @@ #include "libp2p/routing/dht_protocol.h" #include "ipfs/merkledag/merkledag.h" #include "ipfs/merkledag/node.h" +#include "ipfs/util/thread_pool.h" #define BUF_SIZE 4096 @@ -41,7 +42,7 @@ int protocol_compare(unsigned char* incoming, size_t incoming_size, const char* /** * We've received a connection. Find out what they want */ -void *ipfs_null_connection (void *ptr) +void ipfs_null_connection (void *ptr) { struct null_connection_params *connection_param = NULL; @@ -62,10 +63,16 @@ void *ipfs_null_connection (void *ptr) // check if they're looking for an upgrade (i.e. secio) unsigned char* results = NULL; size_t bytes_read = 0; - if (!session.default_stream->read(&session, &results, &bytes_read) ) { + if (!session.default_stream->read(&session, &results, &bytes_read, 5) ) { libp2p_logger_debug("null", "stream transaction read returned false\n"); break; } + if (null_shutting_down) { + break; + } + if (bytes_read == 0) { // timeout + continue; + } libp2p_logger_debug("null", "Read %lu bytes from a stream tranaction\n", bytes_read); if (protocol_compare(results, bytes_read, "/secio")) { libp2p_logger_debug("null", "Attempting secure io connection...\n"); @@ -86,7 +93,7 @@ void *ipfs_null_connection (void *ptr) while(_continue) { unsigned char* hash; size_t hash_length = 0; - _continue = session.default_stream->read(&session, &hash, &hash_length); + _continue = session.default_stream->read(&session, &hash, &hash_length, 5); if (hash_length < 20) { _continue = 0; continue; @@ -120,9 +127,8 @@ void *ipfs_null_connection (void *ptr) libp2p_logger_log("null", LOGLEVEL_DEBUG, "kademlia message handled\n"); } else { - libp2p_logger_error("null", "There was a problem with this connection. It is nothing I can handle. Looping to try again.\n"); - // oops there was a problem - //TODO: Handle this + libp2p_logger_error("null", "There was a problem with this connection. It is nothing I can handle. Disconnecting.\n"); + break; } free(results); } @@ -137,14 +143,16 @@ void *ipfs_null_connection (void *ptr) libp2p_net_multistream_stream_free(session.insecure_stream); } (*(connection_param->count))--; // update counter. + if (connection_param->ip != NULL) + free(connection_param->ip); free (connection_param); - return (void*) 1; + return; } void *ipfs_null_listen (void *ptr) { int socketfd, s, count = 0; - pthread_t pth_connection; + threadpool thpool = thpool_init(25); struct IpfsNodeListenParams *listen_param; struct null_connection_params *connection_param; @@ -183,16 +191,13 @@ void *ipfs_null_listen (void *ptr) connection_param->port = 0; } // Create pthread for ipfs_null_connection. - if (pthread_create(&pth_connection, NULL, ipfs_null_connection, connection_param)) { - libp2p_logger_log("null", LOGLEVEL_DEBUG, "Error creating thread for connection %d\n", count); - close (s); - } else { - pthread_detach (pth_connection); - } + thpool_add_work(thpool, ipfs_null_connection, connection_param); } } } + thpool_destroy(thpool); + return (void*) 2; } diff --git a/importer/resolver.c b/importer/resolver.c index 6bf9c0d..6d18ac0 100644 --- a/importer/resolver.c +++ b/importer/resolver.c @@ -154,7 +154,7 @@ struct Node* ipfs_resolver_remote_get(const char* path, struct Node* from, const unsigned char* response; size_t response_size; // we should get back a protobuf'd record - stream->read(&session_context, &response, &response_size); + stream->read(&session_context, &response, &response_size, 5); if (response_size == 1) return NULL; // turn the protobuf into a Node diff --git a/include/ipfs/core/daemon.h b/include/ipfs/core/daemon.h index 9aef6b2..23ac421 100644 --- a/include/ipfs/core/daemon.h +++ b/include/ipfs/core/daemon.h @@ -26,7 +26,7 @@ struct IpfsNodeListenParams { struct IpfsNode* local_node; }; -void *ipfs_null_connection (void *ptr); +//void *ipfs_null_connection (void *ptr); void *ipfs_null_listen (void *ptr); int ipfs_null_shutdown(); int ipfs_daemon (int argc, char **argv); diff --git a/include/ipfs/util/thread_pool.h b/include/ipfs/util/thread_pool.h new file mode 100644 index 0000000..04d5af2 --- /dev/null +++ b/include/ipfs/util/thread_pool.h @@ -0,0 +1,187 @@ +/********************************** + * @author Johan Hanssen Seferidis + * License: MIT + * + **********************************/ + +#ifndef _THPOOL_ +#define _THPOOL_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* =================================== API ======================================= */ + + +typedef struct thpool_* threadpool; + + +/** + * @brief Initialize threadpool + * + * Initializes a threadpool. This function will not return untill all + * threads have initialized successfully. + * + * @example + * + * .. + * threadpool thpool; //First we declare a threadpool + * thpool = thpool_init(4); //then we initialize it to 4 threads + * .. + * + * @param num_threads number of threads to be created in the threadpool + * @return threadpool created threadpool on success, + * NULL on error + */ +threadpool thpool_init(int num_threads); + + +/** + * @brief Add work to the job queue + * + * Takes an action and its argument and adds it to the threadpool's job queue. + * If you want to add to work a function with more than one arguments then + * a way to implement this is by passing a pointer to a structure. + * + * NOTICE: You have to cast both the function and argument to not get warnings. + * + * @example + * + * void print_num(int num){ + * printf("%d\n", num); + * } + * + * int main() { + * .. + * int a = 10; + * thpool_add_work(thpool, (void*)print_num, (void*)a); + * .. + * } + * + * @param threadpool threadpool to which the work will be added + * @param function_p pointer to function to add as work + * @param arg_p pointer to an argument + * @return 0 on successs, -1 otherwise. + */ +int thpool_add_work(threadpool, void (*function_p)(void*), void* arg_p); + + +/** + * @brief Wait for all queued jobs to finish + * + * Will wait for all jobs - both queued and currently running to finish. + * Once the queue is empty and all work has completed, the calling thread + * (probably the main program) will continue. + * + * Smart polling is used in wait. The polling is initially 0 - meaning that + * there is virtually no polling at all. If after 1 seconds the threads + * haven't finished, the polling interval starts growing exponentially + * untill it reaches max_secs seconds. Then it jumps down to a maximum polling + * interval assuming that heavy processing is being used in the threadpool. + * + * @example + * + * .. + * threadpool thpool = thpool_init(4); + * .. + * // Add a bunch of work + * .. + * thpool_wait(thpool); + * puts("All added work has finished"); + * .. + * + * @param threadpool the threadpool to wait for + * @return nothing + */ +void thpool_wait(threadpool); + + +/** + * @brief Pauses all threads immediately + * + * The threads will be paused no matter if they are idle or working. + * The threads return to their previous states once thpool_resume + * is called. + * + * While the thread is being paused, new work can be added. + * + * @example + * + * threadpool thpool = thpool_init(4); + * thpool_pause(thpool); + * .. + * // Add a bunch of work + * .. + * thpool_resume(thpool); // Let the threads start their magic + * + * @param threadpool the threadpool where the threads should be paused + * @return nothing + */ +void thpool_pause(threadpool); + + +/** + * @brief Unpauses all threads if they are paused + * + * @example + * .. + * thpool_pause(thpool); + * sleep(10); // Delay execution 10 seconds + * thpool_resume(thpool); + * .. + * + * @param threadpool the threadpool where the threads should be unpaused + * @return nothing + */ +void thpool_resume(threadpool); + + +/** + * @brief Destroy the threadpool + * + * This will wait for the currently active threads to finish and then 'kill' + * the whole threadpool to free up memory. + * + * @example + * int main() { + * threadpool thpool1 = thpool_init(2); + * threadpool thpool2 = thpool_init(2); + * .. + * thpool_destroy(thpool1); + * .. + * return 0; + * } + * + * @param threadpool the threadpool to destroy + * @return nothing + */ +void thpool_destroy(threadpool); + + +/** + * @brief Show currently working threads + * + * Working threads are the threads that are performing work (not idle). + * + * @example + * int main() { + * threadpool thpool1 = thpool_init(2); + * threadpool thpool2 = thpool_init(2); + * .. + * printf("Working threads: %d\n", thpool_num_threads_working(thpool1)); + * .. + * return 0; + * } + * + * @param threadpool the threadpool of interest + * @return integer number of threads working + */ +int thpool_num_threads_working(threadpool); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/main/Makefile b/main/Makefile index 4282ac7..3a666aa 100644 --- a/main/Makefile +++ b/main/Makefile @@ -28,7 +28,8 @@ OBJS = main.o \ ../unixfs/unixfs.o \ ../../c-protobuf/protobuf.o ../../c-protobuf/varint.o \ ../util/errs.o \ - ../util/time.o + ../util/time.o \ + ../util/thread_pool.o %.o: %.c $(DEPS) $(CC) -c -o $@ $< $(CFLAGS) diff --git a/routing/online.c b/routing/online.c index d54d347..3e04d57 100644 --- a/routing/online.c +++ b/routing/online.c @@ -37,7 +37,7 @@ struct Libp2pMessage* ipfs_routing_online_send_receive_message(struct Stream* st // send the message, and expect the same back session_context.default_stream->write(&session_context, protobuf, protobuf_size); - session_context.default_stream->read(&session_context, &results, &results_size); + session_context.default_stream->read(&session_context, &results, &results_size, 5); // see if we can unprotobuf if (!libp2p_message_protobuf_decode(results, results_size, &return_message)) @@ -190,7 +190,8 @@ int ipfs_routing_online_find_peer(struct IpfsRouting* routing, const char* peer_ int ipfs_routing_online_provide(struct IpfsRouting* routing, char* key, size_t key_size) { struct Libp2pPeer* local_peer = libp2p_peer_new(); local_peer->id_size = strlen(routing->local_node->identity->peer_id); - local_peer->id = routing->local_node->identity->peer_id; + local_peer->id = malloc(local_peer->id_size); + memcpy(local_peer->id, routing->local_node->identity->peer_id, local_peer->id_size); local_peer->connection_type = CONNECTION_TYPE_CONNECTED; local_peer->addr_head = libp2p_utils_linked_list_new(); struct MultiAddress* temp_ma = multiaddress_new_from_string((char*)routing->local_node->repo->config->addresses->swarm_head->item); diff --git a/test/Makefile b/test/Makefile index 168350e..66f2513 100644 --- a/test/Makefile +++ b/test/Makefile @@ -28,6 +28,7 @@ OBJS = testit.o test_helper.o \ ../routing/supernode.o \ ../thirdparty/ipfsaddr/ipfs_addr.o \ ../unixfs/unixfs.o \ + ../util/thread_pool.o \ ../../c-protobuf/protobuf.o ../../c-protobuf/varint.o %.o: %.c $(DEPS) diff --git a/test/routing/test_routing.h b/test/routing/test_routing.h index 18b84ae..8cb75f8 100644 --- a/test/routing/test_routing.h +++ b/test/routing/test_routing.h @@ -233,6 +233,90 @@ int test_routing_find_providers() { if (local_node.routing != NULL) { ipfs_routing_online_free(local_node.routing); } + if (node != NULL) + ipfs_node_free(node); + if (result != NULL) { + // we have a vector of peers. Clean 'em up: + for(int i = 0; i < result->total; i++) { + struct Libp2pPeer* p = (struct Libp2pPeer*)libp2p_utils_vector_get(result, i); + libp2p_peer_free(p); + } + libp2p_utils_vector_free(result); + } + libp2p_logger_free(); + return retVal; + +} + +/*** + * Fire up a "client" and "server" and let the client tell the server he's providing a file + */ +int test_routing_provide() { + int retVal = 0; + // clean out repository + char* ipfs_path = "/tmp/test1"; + os_utils_setenv("IPFS_PATH", ipfs_path, 1); + char* peer_id_1 = NULL; + char* peer_id_2 = NULL; + struct FSRepo* fs_repo_2 = NULL; + pthread_t thread1, thread2; + int thread1_started = 0, thread2_started = 0; + struct MultiAddress* ma_peer1 = NULL; + struct Libp2pVector* ma_vector2 = NULL; + + // create peer 1 + drop_and_build_repository(ipfs_path, 4001, NULL, &peer_id_1); + char multiaddress_string[255]; + sprintf(multiaddress_string, "/ip4/127.0.0.1/tcp/4001/ipfs/%s", peer_id_1); + ma_peer1 = multiaddress_new_from_string(multiaddress_string); + // start the daemon in a separate thread + if (pthread_create(&thread1, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0) { + fprintf(stderr, "Unable to start thread 1\n"); + goto exit; + } + thread1_started = 1; + + // create peer 2 + ipfs_path = "/tmp/test2"; + os_utils_setenv("IPFS_PATH", ipfs_path, 1); + // create a vector to hold peer1's multiaddress so we can connect as a peer + ma_vector2 = libp2p_utils_vector_new(1); + libp2p_utils_vector_add(ma_vector2, ma_peer1); + // note: this distroys some things, as it frees the fs_repo: + drop_and_build_repository(ipfs_path, 4002, ma_vector2, &peer_id_2); + // add a file, to prime the connection to peer 1 + //TODO: Find a better way to do this... + size_t bytes_written = 0; + ipfs_repo_fsrepo_new(ipfs_path, NULL, &fs_repo_2); + ipfs_repo_fsrepo_open(fs_repo_2); + struct Node* node = NULL; + ipfs_import_file(NULL, "/home/parallels/ipfstest/hello_world.txt", &node, fs_repo_2, &bytes_written, 0); + ipfs_repo_fsrepo_free(fs_repo_2); + // start the daemon in a separate thread + if (pthread_create(&thread2, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0) { + fprintf(stderr, "Unable to start thread 2\n"); + goto exit; + } + thread2_started = 1; + + // wait for everything to start up + // JMJ debugging = + sleep(3); + + retVal = 1; + exit: + ipfs_daemon_stop(); + if (peer_id_1 != NULL) + free(peer_id_1); + if (peer_id_2 != NULL) + free(peer_id_2); + if (thread1_started) + pthread_join(thread1, NULL); + if (thread2_started) + pthread_join(thread2, NULL); + if (ma_vector2 != NULL) { + libp2p_utils_vector_free(ma_vector2); + } if (node != NULL) ipfs_node_free(node); libp2p_logger_free(); diff --git a/test/testit.c b/test/testit.c index 3012f71..6d5268f 100644 --- a/test/testit.c +++ b/test/testit.c @@ -64,6 +64,7 @@ const char* names[] = { "test_resolver_get", "test_routing_find_peer", "test_routing_find_providers", + "test_routing_provide", "test_routing_supernode_get_value", "test_routing_supernode_get_remote_value", "test_unixfs_encode_decode", @@ -109,6 +110,7 @@ int (*funcs[])(void) = { test_resolver_get, test_routing_find_peer, test_routing_find_providers, + test_routing_provide, test_routing_supernode_get_value, test_routing_supernode_get_remote_value, test_unixfs_encode_decode, diff --git a/util/Makefile b/util/Makefile index 559e417..380d5b5 100644 --- a/util/Makefile +++ b/util/Makefile @@ -7,7 +7,7 @@ endif LFLAGS = DEPS = -OBJS = errs.o time.o +OBJS = errs.o time.o thread_pool.o %.o: %.c $(DEPS) $(CC) -c -o $@ $< $(CFLAGS) diff --git a/util/thread_pool.c b/util/thread_pool.c new file mode 100644 index 0000000..c7a3624 --- /dev/null +++ b/util/thread_pool.c @@ -0,0 +1,551 @@ +/* ******************************** + * Author: Johan Hanssen Seferidis + * License: MIT + * Description: Library providing a threading pool where you can add + * work. For usage, check the thpool.h file or README.md + * + *//** @file thpool.h *//* + * + ********************************/ + +#define _POSIX_C_SOURCE 200809L +#include +#include +#include +#include +#include +#include +#include +#if defined(__linux__) +#include +#endif + +#include "ipfs/util/thread_pool.h" + +#ifdef THPOOL_DEBUG +#define THPOOL_DEBUG 1 +#else +#define THPOOL_DEBUG 0 +#endif + +#if !defined(DISABLE_PRINT) || defined(THPOOL_DEBUG) +#define err(str) fprintf(stderr, str) +#else +#define err(str) +#endif + +static volatile int threads_keepalive; +static volatile int threads_on_hold; + + + +/* ========================== STRUCTURES ============================ */ + + +/* Binary semaphore */ +typedef struct bsem { + pthread_mutex_t mutex; + pthread_cond_t cond; + int v; +} bsem; + + +/* Job */ +typedef struct job{ + struct job* prev; /* pointer to previous job */ + void (*function)(void* arg); /* function pointer */ + void* arg; /* function's argument */ +} job; + + +/* Job queue */ +typedef struct jobqueue{ + pthread_mutex_t rwmutex; /* used for queue r/w access */ + job *front; /* pointer to front of queue */ + job *rear; /* pointer to rear of queue */ + bsem *has_jobs; /* flag as binary semaphore */ + int len; /* number of jobs in queue */ +} jobqueue; + + +/* Thread */ +typedef struct thread{ + int id; /* friendly id */ + pthread_t pthread; /* pointer to actual thread */ + struct thpool_* thpool_p; /* access to thpool */ +} thread; + + +/* Threadpool */ +typedef struct thpool_{ + thread** threads; /* pointer to threads */ + volatile int num_threads_alive; /* threads currently alive */ + volatile int num_threads_working; /* threads currently working */ + pthread_mutex_t thcount_lock; /* used for thread count etc */ + pthread_cond_t threads_all_idle; /* signal to thpool_wait */ + jobqueue jobqueue; /* job queue */ +} thpool_; + + + + + +/* ========================== PROTOTYPES ============================ */ + + +static int thread_init(thpool_* thpool_p, struct thread** thread_p, int id); +static void* thread_do(struct thread* thread_p); +static void thread_hold(int sig_id); +static void thread_destroy(struct thread* thread_p); + +static int jobqueue_init(jobqueue* jobqueue_p); +static void jobqueue_clear(jobqueue* jobqueue_p); +static void jobqueue_push(jobqueue* jobqueue_p, struct job* newjob_p); +static struct job* jobqueue_pull(jobqueue* jobqueue_p); +static void jobqueue_destroy(jobqueue* jobqueue_p); + +static void bsem_init(struct bsem *bsem_p, int value); +static void bsem_reset(struct bsem *bsem_p); +static void bsem_post(struct bsem *bsem_p); +static void bsem_post_all(struct bsem *bsem_p); +static void bsem_wait(struct bsem *bsem_p); + + + + + +/* ========================== THREADPOOL ============================ */ + + +/* Initialise thread pool */ +struct thpool_* thpool_init(int num_threads){ + + threads_on_hold = 0; + threads_keepalive = 1; + + if (num_threads < 0){ + num_threads = 0; + } + + /* Make new thread pool */ + thpool_* thpool_p; + thpool_p = (struct thpool_*)malloc(sizeof(struct thpool_)); + if (thpool_p == NULL){ + err("thpool_init(): Could not allocate memory for thread pool\n"); + return NULL; + } + thpool_p->num_threads_alive = 0; + thpool_p->num_threads_working = 0; + + /* Initialise the job queue */ + if (jobqueue_init(&thpool_p->jobqueue) == -1){ + err("thpool_init(): Could not allocate memory for job queue\n"); + free(thpool_p); + return NULL; + } + + /* Make threads in pool */ + thpool_p->threads = (struct thread**)malloc(num_threads * sizeof(struct thread *)); + if (thpool_p->threads == NULL){ + err("thpool_init(): Could not allocate memory for threads\n"); + jobqueue_destroy(&thpool_p->jobqueue); + free(thpool_p); + return NULL; + } + + pthread_mutex_init(&(thpool_p->thcount_lock), NULL); + pthread_cond_init(&thpool_p->threads_all_idle, NULL); + + /* Thread init */ + int n; + for (n=0; nthreads[n], n); +#if THPOOL_DEBUG + printf("THPOOL_DEBUG: Created thread %d in pool \n", n); +#endif + } + + /* Wait for threads to initialize */ + while (thpool_p->num_threads_alive != num_threads) {} + + return thpool_p; +} + + +/* Add work to the thread pool */ +int thpool_add_work(thpool_* thpool_p, void (*function_p)(void*), void* arg_p){ + job* newjob; + + newjob=(struct job*)malloc(sizeof(struct job)); + if (newjob==NULL){ + err("thpool_add_work(): Could not allocate memory for new job\n"); + return -1; + } + + /* add function and argument */ + newjob->function=function_p; + newjob->arg=arg_p; + + /* add job to queue */ + jobqueue_push(&thpool_p->jobqueue, newjob); + + return 0; +} + + +/* Wait until all jobs have finished */ +void thpool_wait(thpool_* thpool_p){ + pthread_mutex_lock(&thpool_p->thcount_lock); + while (thpool_p->jobqueue.len || thpool_p->num_threads_working) { + pthread_cond_wait(&thpool_p->threads_all_idle, &thpool_p->thcount_lock); + } + pthread_mutex_unlock(&thpool_p->thcount_lock); +} + + +/* Destroy the threadpool */ +void thpool_destroy(thpool_* thpool_p){ + /* No need to destory if it's NULL */ + if (thpool_p == NULL) return ; + + volatile int threads_total = thpool_p->num_threads_alive; + + /* End each thread 's infinite loop */ + threads_keepalive = 0; + + /* Give one second to kill idle threads */ + double TIMEOUT = 1.0; + time_t start, end; + double tpassed = 0.0; + time (&start); + while (tpassed < TIMEOUT && thpool_p->num_threads_alive){ + bsem_post_all(thpool_p->jobqueue.has_jobs); + time (&end); + tpassed = difftime(end,start); + } + + /* Poll remaining threads */ + while (thpool_p->num_threads_alive){ + bsem_post_all(thpool_p->jobqueue.has_jobs); + sleep(1); + } + + /* Job queue cleanup */ + jobqueue_destroy(&thpool_p->jobqueue); + /* Deallocs */ + int n; + for (n=0; n < threads_total; n++){ + thread_destroy(thpool_p->threads[n]); + } + free(thpool_p->threads); + free(thpool_p); +} + + +/* Pause all threads in threadpool */ +void thpool_pause(thpool_* thpool_p) { + int n; + for (n=0; n < thpool_p->num_threads_alive; n++){ + pthread_kill(thpool_p->threads[n]->pthread, SIGUSR1); + } +} + + +/* Resume all threads in threadpool */ +void thpool_resume(thpool_* thpool_p) { + // resuming a single threadpool hasn't been + // implemented yet, meanwhile this supresses + // the warnings + (void)thpool_p; + + threads_on_hold = 0; +} + + +int thpool_num_threads_working(thpool_* thpool_p){ + return thpool_p->num_threads_working; +} + + + + + +/* ============================ THREAD ============================== */ + + +/* Initialize a thread in the thread pool + * + * @param thread address to the pointer of the thread to be created + * @param id id to be given to the thread + * @return 0 on success, -1 otherwise. + */ +static int thread_init (thpool_* thpool_p, struct thread** thread_p, int id){ + + *thread_p = (struct thread*)malloc(sizeof(struct thread)); + if (thread_p == NULL){ + err("thread_init(): Could not allocate memory for thread\n"); + return -1; + } + + (*thread_p)->thpool_p = thpool_p; + (*thread_p)->id = id; + + pthread_create(&(*thread_p)->pthread, NULL, (void *)thread_do, (*thread_p)); + pthread_detach((*thread_p)->pthread); + return 0; +} + + +/* Sets the calling thread on hold */ +static void thread_hold(int sig_id) { + (void)sig_id; + threads_on_hold = 1; + while (threads_on_hold){ + sleep(1); + } +} + + +/* What each thread is doing +* +* In principle this is an endless loop. The only time this loop gets interuppted is once +* thpool_destroy() is invoked or the program exits. +* +* @param thread thread that will run this function +* @return nothing +*/ +static void* thread_do(struct thread* thread_p){ + + /* Set thread name for profiling and debuging */ + char thread_name[128] = {0}; + sprintf(thread_name, "thread-pool-%d", thread_p->id); + +#if defined(__linux__) + /* Use prctl instead to prevent using _GNU_SOURCE flag and implicit declaration */ + prctl(PR_SET_NAME, thread_name); +#elif defined(__APPLE__) && defined(__MACH__) + pthread_setname_np(thread_name); +#else + err("thread_do(): pthread_setname_np is not supported on this system"); +#endif + + /* Assure all threads have been created before starting serving */ + thpool_* thpool_p = thread_p->thpool_p; + + /* Register signal handler */ + struct sigaction act; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + act.sa_handler = thread_hold; + if (sigaction(SIGUSR1, &act, NULL) == -1) { + err("thread_do(): cannot handle SIGUSR1"); + } + + /* Mark thread as alive (initialized) */ + pthread_mutex_lock(&thpool_p->thcount_lock); + thpool_p->num_threads_alive += 1; + pthread_mutex_unlock(&thpool_p->thcount_lock); + + while(threads_keepalive){ + + bsem_wait(thpool_p->jobqueue.has_jobs); + + if (threads_keepalive){ + + pthread_mutex_lock(&thpool_p->thcount_lock); + thpool_p->num_threads_working++; + pthread_mutex_unlock(&thpool_p->thcount_lock); + + /* Read job from queue and execute it */ + void (*func_buff)(void*); + void* arg_buff; + job* job_p = jobqueue_pull(&thpool_p->jobqueue); + if (job_p) { + func_buff = job_p->function; + arg_buff = job_p->arg; + func_buff(arg_buff); + free(job_p); + } + + pthread_mutex_lock(&thpool_p->thcount_lock); + thpool_p->num_threads_working--; + if (!thpool_p->num_threads_working) { + pthread_cond_signal(&thpool_p->threads_all_idle); + } + pthread_mutex_unlock(&thpool_p->thcount_lock); + + } + } + pthread_mutex_lock(&thpool_p->thcount_lock); + thpool_p->num_threads_alive --; + pthread_mutex_unlock(&thpool_p->thcount_lock); + + return NULL; +} + + +/* Frees a thread */ +static void thread_destroy (thread* thread_p){ + free(thread_p); +} + + + + + +/* ============================ JOB QUEUE =========================== */ + + +/* Initialize queue */ +static int jobqueue_init(jobqueue* jobqueue_p){ + jobqueue_p->len = 0; + jobqueue_p->front = NULL; + jobqueue_p->rear = NULL; + + jobqueue_p->has_jobs = (struct bsem*)malloc(sizeof(struct bsem)); + if (jobqueue_p->has_jobs == NULL){ + return -1; + } + + pthread_mutex_init(&(jobqueue_p->rwmutex), NULL); + bsem_init(jobqueue_p->has_jobs, 0); + + return 0; +} + + +/* Clear the queue */ +static void jobqueue_clear(jobqueue* jobqueue_p){ + + while(jobqueue_p->len){ + free(jobqueue_pull(jobqueue_p)); + } + + jobqueue_p->front = NULL; + jobqueue_p->rear = NULL; + bsem_reset(jobqueue_p->has_jobs); + jobqueue_p->len = 0; + +} + + +/* Add (allocated) job to queue + */ +static void jobqueue_push(jobqueue* jobqueue_p, struct job* newjob){ + + pthread_mutex_lock(&jobqueue_p->rwmutex); + newjob->prev = NULL; + + switch(jobqueue_p->len){ + + case 0: /* if no jobs in queue */ + jobqueue_p->front = newjob; + jobqueue_p->rear = newjob; + break; + + default: /* if jobs in queue */ + jobqueue_p->rear->prev = newjob; + jobqueue_p->rear = newjob; + + } + jobqueue_p->len++; + + bsem_post(jobqueue_p->has_jobs); + pthread_mutex_unlock(&jobqueue_p->rwmutex); +} + + +/* Get first job from queue(removes it from queue) +<<<<<<< HEAD + * + * Notice: Caller MUST hold a mutex +======= +>>>>>>> da2c0fe45e43ce0937f272c8cd2704bdc0afb490 + */ +static struct job* jobqueue_pull(jobqueue* jobqueue_p){ + + pthread_mutex_lock(&jobqueue_p->rwmutex); + job* job_p = jobqueue_p->front; + + switch(jobqueue_p->len){ + + case 0: /* if no jobs in queue */ + break; + + case 1: /* if one job in queue */ + jobqueue_p->front = NULL; + jobqueue_p->rear = NULL; + jobqueue_p->len = 0; + break; + + default: /* if >1 jobs in queue */ + jobqueue_p->front = job_p->prev; + jobqueue_p->len--; + /* more than one job in queue -> post it */ + bsem_post(jobqueue_p->has_jobs); + + } + + pthread_mutex_unlock(&jobqueue_p->rwmutex); + return job_p; +} + + +/* Free all queue resources back to the system */ +static void jobqueue_destroy(jobqueue* jobqueue_p){ + jobqueue_clear(jobqueue_p); + free(jobqueue_p->has_jobs); +} + + + + + +/* ======================== SYNCHRONISATION ========================= */ + + +/* Init semaphore to 1 or 0 */ +static void bsem_init(bsem *bsem_p, int value) { + if (value < 0 || value > 1) { + err("bsem_init(): Binary semaphore can take only values 1 or 0"); + exit(1); + } + pthread_mutex_init(&(bsem_p->mutex), NULL); + pthread_cond_init(&(bsem_p->cond), NULL); + bsem_p->v = value; +} + + +/* Reset semaphore to 0 */ +static void bsem_reset(bsem *bsem_p) { + bsem_init(bsem_p, 0); +} + + +/* Post to at least one thread */ +static void bsem_post(bsem *bsem_p) { + pthread_mutex_lock(&bsem_p->mutex); + bsem_p->v = 1; + pthread_cond_signal(&bsem_p->cond); + pthread_mutex_unlock(&bsem_p->mutex); +} + + +/* Post to all threads */ +static void bsem_post_all(bsem *bsem_p) { + pthread_mutex_lock(&bsem_p->mutex); + bsem_p->v = 1; + pthread_cond_broadcast(&bsem_p->cond); + pthread_mutex_unlock(&bsem_p->mutex); +} + + +/* Wait on semaphore until semaphore has value 0 */ +static void bsem_wait(bsem* bsem_p) { + pthread_mutex_lock(&bsem_p->mutex); + while (bsem_p->v != 1) { + pthread_cond_wait(&bsem_p->cond, &bsem_p->mutex); + } + bsem_p->v = 0; + pthread_mutex_unlock(&bsem_p->mutex); +}