diff --git a/.gitignore b/.gitignore index ab89455..ce47bf4 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,9 @@ .settings/language.settings.xml test/test_ipfs main/ipfs +test/test1.txt +test/test2.txt +test/scripts/hello.bin +test/scripts/hello2.bin +test/scripts/testlog.txt +test/scripts/generate_file diff --git a/blocks/block.c b/blocks/block.c index bdd1381..a8dbf2a 100644 --- a/blocks/block.c +++ b/blocks/block.c @@ -175,9 +175,19 @@ int ipfs_block_free(struct Block* block) { */ struct Block* ipfs_block_copy(struct Block* original) { struct Block* copy = ipfs_block_new(); - copy->data_length = original->data_length; - copy->data = (unsigned char*) malloc(original->data_length); - memcpy(copy->data, original->data, original->data_length); - copy->cid = ipfs_cid_copy(original->cid); + if (copy != NULL) { + copy->data_length = original->data_length; + copy->data = (unsigned char*) malloc(original->data_length); + if (copy->data == NULL) { + ipfs_block_free(copy); + return NULL; + } + memcpy(copy->data, original->data, original->data_length); + copy->cid = ipfs_cid_copy(original->cid); + if (copy->cid == NULL) { + ipfs_block_free(copy); + return NULL; + } + } return copy; } diff --git a/blocks/blockstore.c b/blocks/blockstore.c index a53836f..2e3f5e3 100644 --- a/blocks/blockstore.c +++ b/blocks/blockstore.c @@ -102,6 +102,8 @@ char* ipfs_blockstore_path_get(const struct FSRepo* fs_repo, const char* filenam } int complete_filename_size = strlen(filepath) + strlen(filename) + 2; char* complete_filename = (char*)malloc(complete_filename_size); + if (complete_filename == NULL) + return NULL; retVal = os_utils_filepath_join(filepath, filename, complete_filename, complete_filename_size); return complete_filename; } @@ -343,10 +345,20 @@ int ipfs_blockstore_get_node(const unsigned char* hash, size_t hash_length, stru size_t bytes_read = fread(buffer, 1, file_size, file); fclose(file); - int retVal = ipfs_hashtable_node_protobuf_decode(buffer, bytes_read, node); + // now we have the block, convert it to a node + struct Block* block; + if (!ipfs_blocks_block_protobuf_decode(buffer, bytes_read, &block)) { + free(key); + free(filename); + ipfs_block_free(block); + return 0; + } + + int retVal = ipfs_hashtable_node_protobuf_decode(block->data, block->data_length, node); free(key); free(filename); + ipfs_block_free(block); return retVal; } diff --git a/cid/cid.c b/cid/cid.c index dcfd281..fdfaa78 100644 --- a/cid/cid.c +++ b/cid/cid.c @@ -147,6 +147,10 @@ struct Cid* ipfs_cid_copy(const struct Cid* original) { copy->version = original->version; copy->hash_length = original->hash_length; copy->hash = (unsigned char*) malloc(original->hash_length); + if (copy->hash == NULL) { + ipfs_cid_free(copy); + return NULL; + } memcpy(copy->hash, original->hash, original->hash_length); } return copy; diff --git a/core/Makefile b/core/Makefile index e2ef9a4..3786d50 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1,5 +1,5 @@ CC = gcc -CFLAGS = -O0 -I../include -I../../c-libp2p/include -I../../c-multiaddr/include -I../../c-protobuf -Wall -std=c99 +CFLAGS = -O0 -I../include -I../../c-libp2p/include -I../../c-multiaddr/include -I../../c-protobuf -Wall -std=c11 ifdef DEBUG CFLAGS += -g3 diff --git a/core/api.c b/core/api.c index ec7d76f..31cd69b 100644 --- a/core/api.c +++ b/core/api.c @@ -326,6 +326,47 @@ struct HttpRequest* api_build_http_request(struct s_request* req) { return request; } +/** + * Write bytes into chunks. + * @param socket, buffer array, length + * @returns 1 when success or 0 if it fails. + */ +int api_send_resp_chunks(int fd, void *buf, size_t size) +{ + char head[20]; + size_t s; + int l; + struct iovec iov[3]; + + // will be reused in each write, so defined only once. + iov[2].iov_base = "\r\n"; + iov[2].iov_len = 2; + + while (size > 0) { + s = size > MAX_CHUNK ? MAX_CHUNK : size; // write only MAX_CHUNK at once + l = snprintf(head, sizeof head, "%x\r\n", (unsigned int)s); + if (l <= 0) + return 0; // fail at snprintf + + iov[0].iov_base = head; + iov[0].iov_len = l; // head length. + iov[1].iov_base = buf; + iov[1].iov_len = s; + + buf += s; + size -= s; + + if (size == 0) { // last chunk + iov[2].iov_base = "\r\n0\r\n\r\n"; + iov[2].iov_len = 7; + } + libp2p_logger_debug("api", "writing chunk block of %d bytes\n", s); + if (writev(fd, iov, 3) == -1) + return 0; // fail writing. + } + return 1; +} + /** * Pthread to take care of each client connection. * @param ptr an ApiConnectionParam @@ -353,7 +394,8 @@ void *api_connection_thread (void *ptr) } r = read(s, buf, sizeof buf); if (r <= 0) { - libp2p_logger_error("api", "Read from client fail.\n"); + // this is a common occurrence, so moved from error to debug + libp2p_logger_debug("api", "Read from client fail.\n"); goto quit; } buf[r] = '\0'; @@ -511,22 +553,20 @@ void *api_connection_thread (void *ptr) // 404 write_str(s, HTTP_404); } else { - snprintf(resp, sizeof(resp), "%s 200 OK\r\n" \ + snprintf(resp, MAX_READ+1, "%s 200 OK\r\n" \ "Content-Type: %s\r\n" "Server: c-ipfs/0.0.0-dev\r\n" "X-Chunked-Output: 1\r\n" "Connection: close\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" - "%x\r\n" - "%s\r\n" - "0\r\n\r\n" - ,req.buf + req.http_ver, http_response->content_type, (unsigned int)http_response->bytes_size, http_response->bytes); - ipfs_core_http_response_free(http_response); + ,req.buf + req.http_ver, http_response->content_type); write_str (s, resp); + api_send_resp_chunks(s, http_response->bytes, http_response->bytes_size); libp2p_logger_debug("api", "resp = {\n%s\n}\n", resp); } ipfs_core_http_request_free(http_request); + ipfs_core_http_response_free(http_response); } else { // uh oh... something went wrong converting to the HttpRequest struct libp2p_logger_error("api", "Unable to build HttpRequest struct.\n"); @@ -686,6 +726,8 @@ int api_start (struct IpfsNode* local_node, int max_conns, int timeout) } local_node->api_context->ipv4 = hostname_to_ip(ip); // api is listening only on loopback. + if (ip != NULL) + free(ip); local_node->api_context->port = port; if ((s = socket_listen(socket_tcp4(), &(local_node->api_context->ipv4), &(local_node->api_context->port))) <= 0) { diff --git a/core/client_api.c b/core/client_api.c index 8e59f3e..da1af47 100644 --- a/core/client_api.c +++ b/core/client_api.c @@ -28,16 +28,23 @@ int api_running(struct IpfsNode* local_node) { portno = multiaddress_get_ip_port(my_multiaddress); multiaddress_get_ip_address(my_multiaddress, &ip); + multiaddress_free(my_multiaddress); + + if (ip == NULL) + return 0; + int sockfd; struct sockaddr_in serv_addr; struct hostent *server; sockfd = socket(AF_INET, SOCK_STREAM, 0); if (sockfd < 0) { + free(ip); return 0; } server = gethostbyname(ip); + free(ip); if (server == NULL) { return 0; diff --git a/core/http_request.c b/core/http_request.c index 1dbdb98..0e8ac89 100644 --- a/core/http_request.c +++ b/core/http_request.c @@ -48,7 +48,6 @@ void ipfs_core_http_request_free(struct HttpRequest* request) { libp2p_utils_vector_free(request->params); } if (request->arguments != NULL) { - // arguments should not be dynamically allocated //for(int i = 0; i < request->arguments->total; i++) { // free((char*)libp2p_utils_vector_get(request->arguments, i)); //} @@ -127,6 +126,10 @@ int ipfs_core_http_process_name(struct IpfsNode* local_node, struct HttpRequest* struct HttpResponse* res = *response; res->content_type = "application/json"; res->bytes = (uint8_t*) malloc(strlen(local_node->identity->peer->id) + strlen(path) + 30); + if (res->bytes == NULL) { + free(result); + return 0; + } sprintf((char*)res->bytes, "{ \"Path\": \"%s\" }", result); res->bytes_size = strlen((char*)res->bytes); } @@ -138,6 +141,8 @@ int ipfs_core_http_process_name(struct IpfsNode* local_node, struct HttpRequest* struct HttpResponse* res = *response; res->content_type = "application/json"; res->bytes = (uint8_t*) malloc(strlen(local_node->identity->peer->id) + strlen(path) + 30); + if (res->bytes == NULL) + return 0; sprintf((char*)res->bytes, "{ \"Name\": \"%s\"\n \"Value\": \"%s\" }", local_node->identity->peer->id, path); res->bytes_size = strlen((char*)res->bytes); } @@ -178,52 +183,61 @@ int ipfs_core_http_process_dht_provide(struct IpfsNode* local_node, struct HttpR char* hash = (char*)libp2p_utils_vector_get(request->arguments, i); struct Cid* cid; if (!ipfs_cid_decode_hash_from_base58((unsigned char*)hash, strlen(hash), &cid)) { + ipfs_cid_free(cid); + cid = NULL; failedCount++; continue; } if (!local_node->routing->Provide(local_node->routing, cid->hash, cid->hash_length)) { + ipfs_cid_free(cid); + cid = NULL; failedCount++; continue; } + ipfs_cid_free(cid); } *response = ipfs_core_http_response_new(); struct HttpResponse* res = *response; res->content_type = "application/json"; res->bytes = (uint8_t*) malloc(1024); - if (!failedCount) { - // complete success - // TODO: do the right thing - snprintf((char*)res->bytes, 1024, "{\n\t\"ID\": \"\"\n" \ - "\t\"Type\": \"\"\n" - "\t\"Responses\": [\n" - "\t\t{\n" - "\t\t\t\"ID\": \"\"\n" - "\t\t\t\"Addrs\": [\n" - "\t\t\t\t\"\"\n" - "\t\t\t]\n" - "\t\t}\n" - "\t]\n" - "\t\"Extra\": \"\"\n" - "}\n" - ); + if (res->bytes == NULL) { + res->bytes_size = 0; } else { - // at least some failed - // TODO: do the right thing - snprintf((char*)res->bytes, 1024, "{\n\t\"ID\": \"\",\n" \ - "\t\"Type\": \"\",\n" - "\t\"Responses\": [\n" - "\t\t{\n" - "\t\t\t\"ID\": \"\",\n" - "\t\t\t\"Addrs\": [\n" - "\t\t\t\t\"\"\n" - "\t\t\t]\n" - "\t\t}\n" - "\t],\n" - "\t\"Extra\": \"\"\n" - "}\n" - ); + if (!failedCount) { + // complete success + // TODO: do the right thing + snprintf((char*)res->bytes, 1024, "{\n\t\"ID\": \"\"\n" \ + "\t\"Type\": \"\"\n" + "\t\"Responses\": [\n" + "\t\t{\n" + "\t\t\t\"ID\": \"\"\n" + "\t\t\t\"Addrs\": [\n" + "\t\t\t\t\"\"\n" + "\t\t\t]\n" + "\t\t}\n" + "\t]\n" + "\t\"Extra\": \"\"\n" + "}\n" + ); + } else { + // at least some failed + // TODO: do the right thing + snprintf((char*)res->bytes, 1024, "{\n\t\"ID\": \"\",\n" \ + "\t\"Type\": \"\",\n" + "\t\"Responses\": [\n" + "\t\t{\n" + "\t\t\t\"ID\": \"\",\n" + "\t\t\t\"Addrs\": [\n" + "\t\t\t\t\"\"\n" + "\t\t\t]\n" + "\t\t}\n" + "\t],\n" + "\t\"Extra\": \"\"\n" + "}\n" + ); + } + res->bytes_size = strlen((char*)res->bytes); } - res->bytes_size = strlen((char*)res->bytes); return failedCount < request->arguments->total; } @@ -241,13 +255,18 @@ int ipfs_core_http_process_dht_get(struct IpfsNode* local_node, struct HttpReque char* hash = (char*)libp2p_utils_vector_get(request->arguments, i); struct Cid* cid; if (!ipfs_cid_decode_hash_from_base58((unsigned char*)hash, strlen(hash), &cid)) { + ipfs_cid_free(cid); + cid = NULL; failedCount++; continue; } if (!local_node->routing->GetValue(local_node->routing, cid->hash, cid->hash_length, (void**)&res->bytes, &res->bytes_size)) { + ipfs_cid_free(cid); + cid = NULL; failedCount++; continue; } + ipfs_cid_free(cid); //TODO: we need to handle multiple arguments } return failedCount < request->arguments->total; @@ -313,7 +332,9 @@ char* ipfs_core_http_request_build_url_start(struct IpfsNode* local_node) { sprintf(port, "%d", portInt); int len = 18 + strlen(host) + strlen(port); char* retVal = malloc(len); - sprintf(retVal, "http://%s:%s/api/v0", host, port); + if (retVal != NULL) { + sprintf(retVal, "http://%s:%s/api/v0", host, port); + } free(host); multiaddress_free(ma); return retVal; @@ -329,18 +350,20 @@ int ipfs_core_http_request_add_commands(struct HttpRequest* request, char** url) // command int addl_length = strlen(request->command) + 2; char* string1 = (char*) malloc(strlen(*url) + addl_length); - sprintf(string1, "%s/%s", *url, request->command); - free(*url); - *url = string1; - // sub_command - if (request->sub_command != NULL) { - addl_length = strlen(request->sub_command) + 2; - string1 = (char*) malloc(strlen(*url) + addl_length); - sprintf(string1, "%s/%s", *url, request->sub_command); + if (string1 != NULL) { + sprintf(string1, "%s/%s", *url, request->command); free(*url); *url = string1; + // sub_command + if (request->sub_command != NULL) { + addl_length = strlen(request->sub_command) + 2; + string1 = (char*) malloc(strlen(*url) + addl_length); + sprintf(string1, "%s/%s", *url, request->sub_command); + free(*url); + *url = string1; + } } - return 1; + return string1 != NULL; } /*** @@ -404,9 +427,10 @@ size_t curl_cb(void* ptr, size_t size, size_t nmemb, struct curl_string* str) { * @param local_node the context * @param request the request * @param result the results + * @param result_size the size of the results * @returns true(1) on success, false(0) on error */ -int ipfs_core_http_request_get(struct IpfsNode* local_node, struct HttpRequest* request, char** result) { +int ipfs_core_http_request_get(struct IpfsNode* local_node, struct HttpRequest* request, char** result, size_t *result_size) { if (request == NULL || request->command == NULL) return 0; @@ -442,13 +466,97 @@ int ipfs_core_http_request_get(struct IpfsNode* local_node, struct HttpRequest* res = curl_easy_perform(curl); curl_easy_cleanup(curl); if (res == CURLE_OK) { - if (strcmp(s.ptr, "404 page not found") != 0) + if (strcmp(s.ptr, "404 page not found") != 0) { *result = s.ptr; + *result_size = s.len; + } else res = -1; } else { libp2p_logger_error("http_request", "Results of [%s] returned failure. Return value: %d.\n", url, res); + if (s.ptr != NULL) + free(s.ptr); } return res == CURLE_OK; } +/** + * Do an HTTP Post to the local API + * @param local_node the context + * @param request the request + * @param result the results + * @param result_size the size of the results + * @param data the array with post data + * @param data_size the data length + * @returns true(1) on success, false(0) on error + */ +int ipfs_core_http_request_post(struct IpfsNode* local_node, struct HttpRequest* request, char** result, size_t* result_size, char *data, size_t data_size) { + if (request == NULL || request->command == NULL || data == NULL) + return 0; + + char* url = ipfs_core_http_request_build_url_start(local_node); + if (url == NULL) + return 0; + + if (!ipfs_core_http_request_add_commands(request, &url)) { + free(url); + return 0; + } + + if (!ipfs_core_http_request_add_parameters(request, &url)) { + free(url); + return 0; + } + + // do the POST using libcurl + CURL *curl; + CURLcode res; + struct curl_string s; + s.len = 0; + s.ptr = malloc(1); + s.ptr[0] = '\0'; + + struct curl_httppost *post = NULL, *last = NULL; + CURLFORMcode curl_form_ret = curl_formadd(&post, &last, + CURLFORM_COPYNAME, "filename", + CURLFORM_PTRCONTENTS, data, + CURLFORM_CONTENTTYPE, "application/octet-stream", + CURLFORM_FILENAME, "", + CURLFORM_CONTENTSLENGTH, data_size, + CURLFORM_END); + + + + if (CURL_FORMADD_OK != curl_form_ret) { + // i'm always getting curl_form_ret == 4 here + // it means CURL_FORMADD_UNKNOWN_OPTION + // what i'm doing wrong? + fprintf(stderr, "curl_form_ret = %d\n", (int)curl_form_ret); + return 0; + } + + curl = curl_easy_init(); + if (!curl) { + return 0; + } + curl_easy_setopt(curl, CURLOPT_URL, url); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, curl_cb); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &s); + curl_easy_setopt(curl, CURLOPT_HTTPPOST, post); + res = curl_easy_perform(curl); + curl_easy_cleanup(curl); + if (res == CURLE_OK) { + if (strcmp(s.ptr, "404 page not found") != 0) { + *result = s.ptr; + *result_size = s.len; + } + else + res = -1; + } else { + //libp2p_logger_error("http_request", "Results of [%s] returned failure. Return value: %d.\n", url, res); + fprintf(stderr, "Results of [%s] returned failure. Return value: %d.\n", url, res); + if (s.ptr != NULL) + free(s.ptr); + } + return res == CURLE_OK; +} diff --git a/core/null.c b/core/null.c index fec2014..80abfbc 100644 --- a/core/null.c +++ b/core/null.c @@ -174,6 +174,11 @@ void* ipfs_null_listen (void *ptr) connection_param->local_node = listen_param->local_node; connection_param->port = listen_param->port; connection_param->ip = malloc(INET_ADDRSTRLEN); + if (connection_param->ip == NULL) { + // we are out of memory + free(connection_param); + continue; + } if (inet_ntop(AF_INET, &(listen_param->ipv4), connection_param->ip, INET_ADDRSTRLEN) == NULL) { free(connection_param->ip); connection_param->ip = NULL; diff --git a/core/ping.c b/core/ping.c index 33c1b72..8b6c28c 100644 --- a/core/ping.c +++ b/core/ping.c @@ -66,6 +66,10 @@ int ipfs_ping (int argc, char **argv) // perhaps they passed an IP and port if (argc >= 3) { char* str = malloc(strlen(argv[2]) + strlen(argv[3]) + 100); + if (str == NULL) { + // memory issue + goto exit; + } sprintf(str, "/ip4/%s/tcp/%s", argv[2], argv[3]); peer_to_ping = libp2p_peer_new(); if (peer_to_ping) { diff --git a/exchange/bitswap/message.c b/exchange/bitswap/message.c index b815ba5..01ab22e 100644 --- a/exchange/bitswap/message.c +++ b/exchange/bitswap/message.c @@ -519,6 +519,10 @@ int ipfs_bitswap_message_protobuf_encode(const struct BitswapMessage* message, u // protobuf it size_t temp_size = ipfs_blocks_block_protobuf_encode_size(entry); uint8_t* temp = (uint8_t*) malloc(temp_size); + if (temp == NULL) { + // memory issues + return 0; + } if (!ipfs_blocks_block_protobuf_encode(entry, temp, temp_size, &temp_size)) { free(temp); return 0; @@ -536,6 +540,9 @@ int ipfs_bitswap_message_protobuf_encode(const struct BitswapMessage* message, u if (message->wantlist != NULL) { size_t temp_size = ipfs_bitswap_wantlist_protobuf_encode_size(message->wantlist); uint8_t* temp = (uint8_t*) malloc(temp_size); + if (temp == NULL) { + return 0; + } if (!ipfs_bitswap_wantlist_protobuf_encode(message->wantlist, temp, temp_size, &temp_size)) { free(temp); return 0; @@ -665,6 +672,9 @@ int ipfs_bitswap_message_add_wantlist_items(struct BitswapMessage* message, stru struct WantlistEntry* entry = ipfs_bitswap_wantlist_entry_new(); entry->block_size = ipfs_cid_protobuf_encode_size(cidEntry->cid); entry->block = (unsigned char*) malloc(entry->block_size); + if (entry->block == NULL) { + return 0; + } if (!ipfs_cid_protobuf_encode(cidEntry->cid, entry->block, entry->block_size, &entry->block_size)) { // TODO: we should do more than return a half-baked list return 0; diff --git a/importer/Makefile b/importer/Makefile index de55e1f..a749dc7 100644 --- a/importer/Makefile +++ b/importer/Makefile @@ -1,5 +1,5 @@ CC = gcc -CFLAGS = -O0 -I../include -I../../c-libp2p/include -I../../c-multihash/include -I../../c-multiaddr/include -I../../c-protobuf -Wall -std=c99 +CFLAGS = -O0 -I../include -I../../c-libp2p/include -I../../c-multihash/include -I../../c-multiaddr/include -I../../c-protobuf -Wall -std=c11 ifdef DEBUG CFLAGS += -g3 diff --git a/importer/exporter.c b/importer/exporter.c index b1f9892..7c4d53b 100644 --- a/importer/exporter.c +++ b/importer/exporter.c @@ -36,16 +36,20 @@ int ipfs_exporter_get_node(struct IpfsNode* local_node, const unsigned char* has goto exit; } - libp2p_logger_debug("exporter", "get_node got a value. Converting it to a HashtableNode\n"); // unprotobuf if (!ipfs_hashtable_node_protobuf_decode(buffer, buffer_size, result)) { - libp2p_logger_debug("exporter", "Conversion to HashtableNode not successful\n"); + libp2p_logger_error("exporter", "Conversion to HashtableNode not successful\n"); goto exit; } // copy in the hash (*result)->hash_size = hash_size; (*result)->hash = malloc(hash_size); + if ( (*result)->hash == NULL) { + // memory issue + libp2p_logger_error("exporter", "get_node: Unable to allocate memory.\n"); + goto exit; + } memcpy((*result)->hash, hash, hash_size); retVal = 1; @@ -267,7 +271,7 @@ int ipfs_exporter_object_cat_to_file(struct IpfsNode *local_node, unsigned char* * @param argv arguments * @returns true(1) on success */ -int ipfs_exporter_object_cat(struct CliArguments* args) { +int ipfs_exporter_object_cat(struct CliArguments* args, FILE* output_file) { struct IpfsNode *local_node = NULL; char* repo_dir = NULL; @@ -290,9 +294,10 @@ int ipfs_exporter_object_cat(struct CliArguments* args) { request->sub_command = "get"; request->arguments = libp2p_utils_vector_new(1); libp2p_utils_vector_add(request->arguments, hash); - int retVal = ipfs_core_http_request_get(local_node, request, &response); - if (response != NULL && strlen(response) > 0) { - fprintf(stdout, "%s", response); + size_t response_size = 0; + int retVal = ipfs_core_http_request_get(local_node, request, &response, &response_size); + if (response != NULL && response_size > 0) { + fwrite(response, 1, response_size, output_file); free(response); } else { retVal = 0; @@ -307,7 +312,7 @@ int ipfs_exporter_object_cat(struct CliArguments* args) { return 0; } - int retVal = ipfs_exporter_object_cat_to_file(local_node, cid->hash, cid->hash_length, stdout); + int retVal = ipfs_exporter_object_cat_to_file(local_node, cid->hash, cid->hash_length, output_file); ipfs_cid_free(cid); return retVal; diff --git a/importer/importer.c b/importer/importer.c index c14ac33..ed6a86b 100644 --- a/importer/importer.c +++ b/importer/importer.c @@ -1,3 +1,6 @@ +// these two for strdup +#define _GNU_SOURCE +#define __USE_GNU #include #include #include @@ -8,6 +11,7 @@ #include "libp2p/os/utils.h" #include "ipfs/cmd/cli.h" #include "ipfs/core/ipfs_node.h" +#include "ipfs/core/http_request.h" #include "ipfs/repo/fsrepo/fs_repo.h" #include "ipfs/repo/init.h" #include "ipfs/unixfs/unixfs.h" @@ -216,6 +220,12 @@ int ipfs_import_file(const char* root_dir, const char* fileName, struct Hashtabl } else { free(path); path = malloc(strlen(root_dir) + strlen(file) + 2); + if (path == NULL) { + // memory issue + if (file != NULL) + free(file); + return 0; + } os_utils_filepath_join(root_dir, file, path, strlen(root_dir) + strlen(file) + 2); new_root_dir = path; } @@ -324,6 +334,9 @@ struct FileList* ipfs_import_get_filelist(struct CliArguments* args) { continue; } struct FileList* current = (struct FileList*)malloc(sizeof(struct FileList)); + if (current == NULL) { + return NULL; + } current->next = NULL; current->file_name = args->argv[i]; // now wire it in @@ -386,32 +399,55 @@ int ipfs_import_files(struct CliArguments* args) { } ipfs_node_offline_new(repo_path, &local_node); - - // import the file(s) - current = first; - while (current != NULL) { - if (current->file_name[0] != '-') { // not a switch - os_utils_split_filename(current->file_name, &path, &filename); - size_t bytes_written = 0; - if (!ipfs_import_file(NULL, current->file_name, &directory_entry, local_node, &bytes_written, recursive)) - goto exit; - ipfs_import_print_node_results(directory_entry, filename); - // cleanup - if (path != NULL) { - free(path); - path = NULL; - } - if (filename != NULL) { - free(filename); - filename = NULL; - } - if (directory_entry != NULL) { - ipfs_hashtable_node_free(directory_entry); - directory_entry = NULL; - } + /** disabling for the time being + if (local_node->mode == MODE_API_AVAILABLE) { + // do this through the API + struct HttpRequest* request = ipfs_core_http_request_new(); + request->command = "add"; + struct HttpParam* recursive_param = ipfs_core_http_param_new(); + recursive_param->name = strdup("recursive"); + recursive_param->value = strdup((recursive ? "true" : "false")); + libp2p_utils_vector_add(request->params, recursive_param); + current = first; + while (current != NULL) { + libp2p_utils_vector_add(request->arguments, current->file_name); + current = current->next; } - current = current->next; - } + uint8_t* result = NULL; + size_t result_size = 0; + if (!ipfs_core_http_request_post(local_node, request, &result, &result_size, data, data_size)) { + + } + + } else { + */ + // No daemon is running. Do this without using the API + // import the file(s) + current = first; + while (current != NULL) { + if (current->file_name[0] != '-') { // not a switch + os_utils_split_filename(current->file_name, &path, &filename); + size_t bytes_written = 0; + if (!ipfs_import_file(NULL, current->file_name, &directory_entry, local_node, &bytes_written, recursive)) + goto exit; + ipfs_import_print_node_results(directory_entry, filename); + // cleanup + if (path != NULL) { + free(path); + path = NULL; + } + if (filename != NULL) { + free(filename); + filename = NULL; + } + if (directory_entry != NULL) { + ipfs_hashtable_node_free(directory_entry); + directory_entry = NULL; + } + } + current = current->next; + } + // } uncomment this line when the api is up and running with file transfer retVal = 1; exit: diff --git a/importer/resolver.c b/importer/resolver.c index 9ede07e..7ba6698 100644 --- a/importer/resolver.c +++ b/importer/resolver.c @@ -28,9 +28,16 @@ int ipfs_resolver_next_path(const char* path, char** next_part) { char* pos = strchr(&path[i+1], '/'); if (pos == NULL) { *next_part = (char*)malloc(strlen(path) + 1); + if ( *next_part == NULL) { + // memory issue + return 0; + } strcpy(*next_part, path); } else { *next_part = (char*)malloc(pos - &path[i] + 1); + if (*next_part == NULL) { + return 0; + } strncpy(*next_part, &path[i], pos-&path[i]); (*next_part)[pos-&path[i]] = 0; } @@ -145,9 +152,11 @@ struct HashtableNode* ipfs_resolver_remote_get(const char* path, struct Hashtabl message->key_size = strlen(key); size_t b58size = 100; uint8_t *b58key = (uint8_t *) malloc(b58size); - libp2p_crypto_encoding_base58_encode((unsigned char*)message->key, message->key_size, (unsigned char**) &b58key, &b58size); - libp2p_logger_debug("resolver", "Attempting to use kademlia to get key %s.\n", b58key); - free(b58key); + if (b58key == NULL) { + libp2p_crypto_encoding_base58_encode((unsigned char*)message->key, message->key_size, (unsigned char**) &b58key, &b58size); + libp2p_logger_debug("resolver", "Attempting to use kademlia to get key %s.\n", b58key); + free(b58key); + } size_t message_protobuf_size = libp2p_message_protobuf_encode_size(message); unsigned char message_protobuf[message_protobuf_size]; libp2p_message_protobuf_encode(message, message_protobuf, message_protobuf_size, &message_protobuf_size); diff --git a/include/ipfs/core/api.h b/include/ipfs/core/api.h index f0a5b93..a708d61 100644 --- a/include/ipfs/core/api.h +++ b/include/ipfs/core/api.h @@ -10,6 +10,7 @@ #endif #define MAX_READ (32*1024) // 32k +#define MAX_CHUNK (32*1024) // 32k struct ApiContext { int socket; @@ -86,6 +87,7 @@ struct s_request { #define cstrstart(a,b) (memcmp(a,b,sizeof(b)-1)==0) #define strstart(a,b) (memcmp(a,b,strlen(b))==0) +int api_send_resp_chunks(int fd, void *buf, size_t size); void *api_connection_thread (void *ptr); void api_connections_cleanup (struct IpfsNode* node); void *api_listen_thread (void *ptr); diff --git a/include/ipfs/core/http_request.h b/include/ipfs/core/http_request.h index cbd1101..c9b5b52 100644 --- a/include/ipfs/core/http_request.h +++ b/include/ipfs/core/http_request.h @@ -71,6 +71,19 @@ int ipfs_core_http_request_process(struct IpfsNode* local_node, struct HttpReque * @param local_node the context * @param request the request * @param result the results + * @param result_size the size of the results * @returns true(1) on success, false(0) on error */ -int ipfs_core_http_request_get(struct IpfsNode* local_node, struct HttpRequest* request, char** result); +int ipfs_core_http_request_get(struct IpfsNode* local_node, struct HttpRequest* request, char** result, size_t* result_size); + +/** + * Do an HTTP Post to the local API + * @param local_node the context + * @param request the request + * @param result the results + * @param result_size the size of the results + * @param data the array with post data + * @param data_size the data length + * @returns true(1) on success, false(0) on error + */ +int ipfs_core_http_request_post(struct IpfsNode* local_node, struct HttpRequest* request, char** result, size_t* result_size, char *data, size_t data_size); diff --git a/include/ipfs/importer/exporter.h b/include/ipfs/importer/exporter.h index 3827c66..39e6ffc 100644 --- a/include/ipfs/importer/exporter.h +++ b/include/ipfs/importer/exporter.h @@ -31,9 +31,10 @@ int ipfs_exporter_object_get(int argc, char** argv); * Called from the command line with ipfs cat [hash]. Retrieves the object pointed to by hash, and displays its block data (links and data elements) * @param argc number of arguments * @param argv arguments + * @param output_file where to stream the results * @returns true(1) on success */ -int ipfs_exporter_object_cat(struct CliArguments* args); +int ipfs_exporter_object_cat(struct CliArguments* args, FILE* output_file); /** * Retrieves the object pointed to by hash and displays the raw data diff --git a/include/ipfs/namesys/pb.h b/include/ipfs/namesys/pb.h index 5cb29f6..49615d5 100644 --- a/include/ipfs/namesys/pb.h +++ b/include/ipfs/namesys/pb.h @@ -21,10 +21,12 @@ struct namesys_pb { // setting an EOL says "this record is valid until..." const static IpnsEntry_ValidityType IpnsEntry_EOL = 0; +/* static char *IpnsEntry_ValidityType_name[] = { "EOL", NULL }; +*/ int IpnsEntry_ValidityType_value (char *s); struct ipns_entry* ipfs_namesys_pb_new_ipns_entry (); diff --git a/main/main.c b/main/main.c index 3e416ad..db2e1ae 100644 --- a/main/main.c +++ b/main/main.c @@ -41,6 +41,9 @@ void stripit(int argc, char** argv) { char* old_arg = argv[argc]; int full_length = strlen(old_arg); char *tmp = (char*) malloc(full_length + 1); + if (tmp == NULL) { + return; + } char* ptr1 = &old_arg[1]; strcpy(tmp, ptr1); tmp[strlen(tmp)-1] = 0; @@ -171,7 +174,7 @@ int main(int argc, char** argv) { //ipfs_exporter_get(argc, argv); //break; case (CAT): - retVal = ipfs_exporter_object_cat(args); + retVal = ipfs_exporter_object_cat(args, stdout); break; case (DNS): retVal = ipfs_dns(argc, argv); diff --git a/merkledag/merkledag.c b/merkledag/merkledag.c index d5966e1..3c2b856 100644 --- a/merkledag/merkledag.c +++ b/merkledag/merkledag.c @@ -92,7 +92,7 @@ int ipfs_merkledag_add(struct HashtableNode* node, struct FSRepo* fs_repo, size_ ipfs_block_free(block); return 0; } - + ipfs_block_free(block); // TODO: call HasBlock (unsure why as yet) return 1; } diff --git a/merkledag/node.c b/merkledag/node.c index f4ff9ff..aed331f 100644 --- a/merkledag/node.c +++ b/merkledag/node.c @@ -29,6 +29,7 @@ enum WireType ipfs_node_link_message_fields[] = { WIRETYPE_LENGTH_DELIMITED, WIR * @Param name: The name of the link (char *) * @Param size: Size of the link (size_t) * @Param ahash: An Qmhash + * @returns true(1) on success, false(0) otherwise */ int ipfs_node_link_create(char * name, unsigned char * ahash, size_t hash_size, struct NodeLink** node_link) { @@ -40,12 +41,18 @@ int ipfs_node_link_create(char * name, unsigned char * ahash, size_t hash_size, // hash link->hash_size = hash_size; link->hash = (unsigned char*)malloc(hash_size); + if (link->hash == NULL) { + ipfs_node_link_free(link); + *node_link = NULL; + return 0; + } memcpy(link->hash, ahash, hash_size); // name if (name != NULL && strlen(name) > 0) { link->name = malloc(strlen(name) + 1); if ( link->name == NULL) { - free(link); + ipfs_node_link_free(link); + *node_link = NULL; return 0; } strcpy(link->name, name); @@ -192,6 +199,9 @@ int ipfs_node_link_protobuf_decode(unsigned char* buffer, size_t buffer_length, } link->hash_size = hash_size - 2; link->hash = (unsigned char*)malloc(link->hash_size); + if (link->hash == NULL) { + goto exit; + } memcpy((char*)link->hash, (char*)&hash[2], link->hash_size); free(hash); pos += bytes_read; @@ -729,13 +739,18 @@ int Node_Resolve(char ** result, char * input1) char * tr; char * end; tr=strtok_r(input,"/",&end); + int retVal = 1; for(int i = 0;tr;i++) { result[i] = (char *) malloc(strlen(tr)+1); - strcpy(result[i], tr); + if (result[i] != NULL) { + strcpy(result[i], tr); + } else { + retVal = 0; + } tr=strtok_r(NULL,"/",&end); } - return 1; + return retVal; } /*Node_Resolve_Links @@ -751,6 +766,9 @@ struct Link_Proc * Node_Resolve_Links(struct HashtableNode * N, char * path) } int expected_link_ammount = Node_Resolve_Max_Size(path); struct Link_Proc * LProc = (struct Link_Proc *) malloc(sizeof(struct Link_Proc) + sizeof(struct NodeLink) * expected_link_ammount); + if (LProc == NULL) { + return NULL; + } LProc->ammount = 0; char * linknames[expected_link_ammount]; Node_Resolve(linknames, path); @@ -761,8 +779,10 @@ struct Link_Proc * Node_Resolve_Links(struct HashtableNode * N, char * path) if(proclink) { LProc->links[i] = (struct NodeLink *)malloc(sizeof(struct NodeLink)); - memcpy(LProc->links[i], proclink, sizeof(struct NodeLink)); - LProc->ammount++; + if (LProc->links[i] == NULL) { // TODO: What should we do if memory wasn't allocated here? + memcpy(LProc->links[i], proclink, sizeof(struct NodeLink)); + LProc->ammount++; + } free(proclink); } } diff --git a/namesys/name.c b/namesys/name.c index e5d450f..f0f2664 100644 --- a/namesys/name.c +++ b/namesys/name.c @@ -20,9 +20,10 @@ int ipfs_name_publish(struct IpfsNode* local_node, char* name) { request->command = "name"; request->sub_command = "publish"; libp2p_utils_vector_add(request->arguments, name); - int retVal = ipfs_core_http_request_get(local_node, request, &response); - if (response != NULL) { - fprintf(stdout, "%s", response); + size_t response_size = 0; + int retVal = ipfs_core_http_request_get(local_node, request, &response, &response_size); + if (response != NULL && response_size > 0) { + fwrite(response, 1, response_size, stdout); free(response); } ipfs_core_http_request_free(request); @@ -38,9 +39,10 @@ int ipfs_name_resolve(struct IpfsNode* local_node, char* name) { request->command = "name"; request->sub_command = "resolve"; libp2p_utils_vector_add(request->arguments, name); - int retVal = ipfs_core_http_request_get(local_node, request, &response); - if (response != NULL) { - fprintf(stdout, "%s", response); + size_t response_size = 0; + int retVal = ipfs_core_http_request_get(local_node, request, &response, &response_size); + if (response != NULL && response_size > 0) { + fwrite(response, 1, response_size, stdout); free(response); } ipfs_core_http_request_free(request); diff --git a/namesys/pb.c b/namesys/pb.c index 3fc43a0..ccc5e0b 100644 --- a/namesys/pb.c +++ b/namesys/pb.c @@ -3,6 +3,7 @@ #include "ipfs/namesys/routing.h" #include "ipfs/namesys/pb.h" +/* int IpnsEntry_ValidityType_value (char *s) { int r; @@ -19,6 +20,7 @@ int IpnsEntry_ValidityType_value (char *s) return -1; // not found. } +*/ struct ipns_entry* ipfs_namesys_pb_new_ipns_entry () { diff --git a/namesys/resolver.c b/namesys/resolver.c index 5f908cf..70f9ec8 100644 --- a/namesys/resolver.c +++ b/namesys/resolver.c @@ -41,6 +41,10 @@ int ipfs_namesys_resolver_resolve_once(struct IpfsNode* local_node, const char* if (local_node->repo->config->datastore->datastore_get(cid->hash, cid->hash_length, &record, local_node->repo->config->datastore)) { // we are able to handle this locally... return the results *results = (char*) malloc(record->value_size + 1); + if (*results == NULL) { + ipfs_cid_free(cid); + return 0; + } memset(*results, 0, record->value_size + 1); memcpy(*results, record->value, record->value_size); ipfs_cid_free(cid); @@ -64,6 +68,9 @@ int ipfs_namesys_resolver_resolve_once(struct IpfsNode* local_node, const char* int ipfs_namesys_resolver_resolve(struct IpfsNode* local_node, const char* path, int recursive, char** results) { char* result = NULL; char* current_path = (char*) malloc(strlen(path) + 1); + if (current_path == NULL) { + return 0; + } strcpy(current_path, path); // if we go more than 10 deep, bail @@ -84,7 +91,8 @@ int ipfs_namesys_resolver_resolve(struct IpfsNode* local_node, const char* path, // result will not be NULL free(current_path); current_path = (char*) malloc(strlen(result)+1); - strcpy(current_path, result); + if (current_path != NULL) + strcpy(current_path, result); free(result); counter++; } while(recursive && is_ipns_string(current_path)); diff --git a/namesys/routing.c b/namesys/routing.c index 9b61fd8..a8e0c99 100644 --- a/namesys/routing.c +++ b/namesys/routing.c @@ -131,6 +131,8 @@ int ipfs_namesys_hex_string_to_bytes(const unsigned char* hex, unsigned char** b // allocate memory *buffer = (unsigned char*)malloc( hex_size / 2 ); unsigned char* ptr = *buffer; + if (ptr == NULL) + return ErrAllocFailed; // convert string for(size_t i = 0; i < hex_size; i++) { diff --git a/repo/config/addresses.c b/repo/config/addresses.c index 6ef8688..aa17ca2 100644 --- a/repo/config/addresses.c +++ b/repo/config/addresses.c @@ -7,8 +7,10 @@ char* alloc_and_copy(char* source) { unsigned long strLen = strlen(source); char* result = malloc(sizeof(char) * (strLen + 1)); - strncpy(result, source, strLen); - result[strLen] = 0; + if (result != NULL) { + strncpy(result, source, strLen); + result[strLen] = 0; + } return result; } diff --git a/repo/config/config.c b/repo/config/config.c index 81a89b5..6a58ed5 100644 --- a/repo/config/config.c +++ b/repo/config/config.c @@ -139,25 +139,35 @@ int ipfs_repo_config_init(struct RepoConfig* config, unsigned int num_bits_for_k return 0; // swarm addresses - char* addr1 = malloc(27); - sprintf(addr1, "/ip4/0.0.0.0/tcp/%d", swarm_port); - config->addresses->swarm_head = libp2p_utils_linked_list_new(); - config->addresses->swarm_head->item = malloc(strlen(addr1) + 1); - strcpy(config->addresses->swarm_head->item, addr1); + char* addr1 = malloc(64); + if (addr1 != NULL) { + sprintf(addr1, "/ip4/0.0.0.0/tcp/%d", swarm_port); + config->addresses->swarm_head = libp2p_utils_linked_list_new(); + if (config->addresses->swarm_head != NULL) { + config->addresses->swarm_head->item = malloc(strlen(addr1) + 1); + if (config->addresses->swarm_head->item != NULL) { + strcpy(config->addresses->swarm_head->item, addr1); + } - sprintf(addr1, "/ip6/::/tcp/%d", swarm_port); - config->addresses->swarm_head->next = libp2p_utils_linked_list_new(); - config->addresses->swarm_head->next->item = malloc(strlen(addr1) + 1); - strcpy(config->addresses->swarm_head->next->item, addr1); - - int port_adder = swarm_port - 4001; - sprintf(addr1, "/ip4/127.0.0.1/tcp/%d", 5001 + port_adder); - config->addresses->api = malloc(strlen(addr1)+1); - strcpy(config->addresses->api, addr1); - sprintf(addr1, "/ip4/127.0.0.1/tcp/%d", 8080 + port_adder); - config->addresses->gateway = malloc(strlen(addr1+1)); - strcpy(config->addresses->gateway, addr1); - free(addr1); + sprintf(addr1, "/ip6/::/tcp/%d", swarm_port); + config->addresses->swarm_head->next = libp2p_utils_linked_list_new(); + if (config->addresses->swarm_head->next != NULL) { + config->addresses->swarm_head->next->item = malloc(strlen(addr1) + 1); + if (config->addresses->swarm_head->next->item != NULL) + strcpy(config->addresses->swarm_head->next->item, addr1); + } + } + int port_adder = swarm_port - 4001; + sprintf(addr1, "/ip4/127.0.0.1/tcp/%d", 5001 + port_adder); + config->addresses->api = malloc(strlen(addr1)+1); + if (config->addresses->api != NULL) + strcpy(config->addresses->api, addr1); + sprintf(addr1, "/ip4/127.0.0.1/tcp/%d", 8080 + port_adder); + config->addresses->gateway = malloc(strlen(addr1)+1); + if (config->addresses->gateway != NULL) + strcpy(config->addresses->gateway, addr1); + free(addr1); + } config->discovery.mdns.enabled = 1; config->discovery.mdns.interval = 10; diff --git a/repo/config/gateway.c b/repo/config/gateway.c index 557da32..82356e5 100644 --- a/repo/config/gateway.c +++ b/repo/config/gateway.c @@ -6,8 +6,10 @@ char* alloc_and_fill(char* source) { char* newString = malloc(sizeof(char) * (strlen(source) + 1)); - strncpy(newString, source, strlen(source)); - newString[strlen(source)] = 0; + if (newString != NULL) { + strncpy(newString, source, strlen(source)); + newString[strlen(source)] = 0; + } return newString; } diff --git a/repo/fsrepo/fs_repo.c b/repo/fsrepo/fs_repo.c index ae15a82..cc80bc6 100644 --- a/repo/fsrepo/fs_repo.c +++ b/repo/fsrepo/fs_repo.c @@ -150,7 +150,8 @@ int ipfs_repo_fsrepo_new(const char* repo_path, struct RepoConfig* config, struc } else { int len = strlen(repo_path) + 1; (*repo)->path = (char*)malloc(len); - strncpy((*repo)->path, repo_path, len); + if ( (*repo)->path != NULL) + strncpy((*repo)->path, repo_path, len); } // allocate other structures if (config != NULL) @@ -574,10 +575,13 @@ int ipfs_repo_fsrepo_node_get(const unsigned char* hash, size_t hash_length, voi if (retVal == 1) { *node_size = ipfs_hashtable_node_protobuf_encode_size(node); *node_obj = malloc(*node_size); + if (*node_obj == NULL) { + ipfs_hashtable_node_free(node); + return 0; + } retVal = ipfs_hashtable_node_protobuf_encode(node, *node_obj, *node_size, node_size); } - if (node != NULL) - ipfs_hashtable_node_free(node); + ipfs_hashtable_node_free(node); return retVal; } diff --git a/repo/fsrepo/lmdb_datastore.c b/repo/fsrepo/lmdb_datastore.c index a8431af..893f492 100644 --- a/repo/fsrepo/lmdb_datastore.c +++ b/repo/fsrepo/lmdb_datastore.c @@ -220,6 +220,10 @@ int repo_fsrepo_lmdb_put(struct DatastoreRecord* datastore_record, const struct journalstore_record = lmdb_journal_record_new(); journalstore_record->hash_size = datastore_record->key_size; journalstore_record->hash = malloc(datastore_record->key_size); + if (journalstore_record->hash == NULL) { + libp2p_logger_error("lmdb_datastore", "put: Unable to allocate memory for key.\n"); + return 0; + } memcpy(journalstore_record->hash, datastore_record->key, datastore_record->key_size); journalstore_record->timestamp = datastore_record->timestamp; // look up the corresponding journalstore record for possible updating @@ -264,17 +268,23 @@ int repo_fsrepo_lmdb_put(struct DatastoreRecord* datastore_record, const struct // add it to the journalstore journalstore_record = lmdb_journal_record_new(); journalstore_record->hash = (uint8_t*) malloc(datastore_record->key_size); - memcpy(journalstore_record->hash, datastore_record->key, datastore_record->key_size); - journalstore_record->hash_size = datastore_record->key_size; - journalstore_record->timestamp = datastore_record->timestamp; - journalstore_record->pending = 1; // TODO: Calculate this correctly - journalstore_record->pin = 1; - if (!lmdb_journalstore_journal_add(journalstore_cursor, journalstore_record)) { - libp2p_logger_error("lmdb_datastore", "Datastore record was added, but problem adding Journalstore record. Continuing.\n"); + if (journalstore_record->hash == NULL) { + libp2p_logger_error("lmdb_datastore", "Unable to allocate memory to add record to journalstore.\n"); + lmdb_journalstore_cursor_close(journalstore_cursor, 0); + lmdb_journal_record_free(journalstore_record); + } else { + memcpy(journalstore_record->hash, datastore_record->key, datastore_record->key_size); + journalstore_record->hash_size = datastore_record->key_size; + journalstore_record->timestamp = datastore_record->timestamp; + journalstore_record->pending = 1; // TODO: Calculate this correctly + journalstore_record->pin = 1; + if (!lmdb_journalstore_journal_add(journalstore_cursor, journalstore_record)) { + libp2p_logger_error("lmdb_datastore", "Datastore record was added, but problem adding Journalstore record. Continuing.\n"); + } + lmdb_journalstore_cursor_close(journalstore_cursor, 0); + lmdb_journal_record_free(journalstore_record); + retVal = 1; } - lmdb_journalstore_cursor_close(journalstore_cursor, 0); - lmdb_journal_record_free(journalstore_record); - retVal = 1; } } else { // datastore record was unable to be added. @@ -321,10 +331,25 @@ int repo_fsrepro_lmdb_open(int argc, char** argv, struct Datastore* datastore) { } struct lmdb_context *db_context = (struct lmdb_context *) malloc(sizeof(struct lmdb_context)); + if (db_context == NULL) { + mdb_env_close(mdb_env); + return 0; + } datastore->datastore_context = (void*) db_context; db_context->db_environment = (void*)mdb_env; db_context->datastore_db = (MDB_dbi*) malloc(sizeof(MDB_dbi)); + if (db_context->datastore_db == NULL) { + mdb_env_close(mdb_env); + free(db_context); + return 0; + } db_context->journal_db = (MDB_dbi*) malloc(sizeof(MDB_dbi)); + if (db_context->journal_db == NULL) { + free(db_context->datastore_db); + free(db_context); + mdb_env_close(mdb_env); + return 0; + } // open the 2 databases if (mdb_txn_begin(mdb_env, NULL, 0, &db_context->current_transaction) != 0) { diff --git a/repo/fsrepo/lmdb_journalstore.c b/repo/fsrepo/lmdb_journalstore.c index 7eba1f1..1a0c3a2 100644 --- a/repo/fsrepo/lmdb_journalstore.c +++ b/repo/fsrepo/lmdb_journalstore.c @@ -101,8 +101,12 @@ int lmdb_journalstore_build_record(const struct MDB_val* db_key, const struct MD } rec->hash_size = db_value->mv_size - 2; rec->hash = malloc(rec->hash_size); - uint8_t *val = (uint8_t*)db_value->mv_data; - memcpy(rec->hash, &val[2], rec->hash_size); + if (rec->hash != NULL) { + uint8_t *val = (uint8_t*)db_value->mv_data; + memcpy(rec->hash, &val[2], rec->hash_size); + } else { + return 0; + } return 1; } diff --git a/repo/init.c b/repo/init.c index dfbf77c..1669070 100644 --- a/repo/init.c +++ b/repo/init.c @@ -95,7 +95,8 @@ int make_ipfs_repository(const char* path, int swarm_port, struct Libp2pVector* printf("peer identity: %s\n", fs_repo->config->identity->peer->id); if (peer_id != NULL) { *peer_id = malloc(fs_repo->config->identity->peer->id_size + 1); - strcpy(*peer_id, fs_repo->config->identity->peer->id); + if (*peer_id != NULL) + strcpy(*peer_id, fs_repo->config->identity->peer->id); } // make sure the repository exists diff --git a/routing/k_routing.c b/routing/k_routing.c index 084f1e3..622fbd4 100644 --- a/routing/k_routing.c +++ b/routing/k_routing.c @@ -50,6 +50,7 @@ int ipfs_routing_kademlia_get_value(struct IpfsRouting* routing, const unsigned * @returns true(1) on success, otherwise false(0) */ int ipfs_routing_kademlia_find_providers(struct IpfsRouting* routing, const unsigned char* key, size_t key_size, struct Libp2pVector** results) { + int retVal = 1; *results = libp2p_utils_vector_new(1); struct Libp2pVector* vector = *results; // see if I can provide it @@ -70,26 +71,30 @@ int ipfs_routing_kademlia_find_providers(struct IpfsRouting* routing, const unsi if (vector->total == 0) { // search requires null terminated key char* key_nt = malloc(key_size + 1); - strncpy(key_nt, (char*)key, key_size); - key_nt[key_size] = 0; - struct MultiAddress** list = search_kademlia(key_nt, 3); - free(key_nt); - if (list != NULL) { - int i = 0; - while (list[i] != NULL) { - struct MultiAddress* current = list[i]; - libp2p_utils_vector_add(vector, current); - i++; + if (key_nt != NULL) { + strncpy(key_nt, (char*)key, key_size); + key_nt[key_size] = 0; + struct MultiAddress** list = search_kademlia(key_nt, 3); + free(key_nt); + if (list != NULL) { + int i = 0; + while (list[i] != NULL) { + struct MultiAddress* current = list[i]; + libp2p_utils_vector_add(vector, current); + i++; + } } + } else { + retVal = 0; } } if (vector->total == 0) { // we were unable to find it, even on the network libp2p_utils_vector_free(vector); vector = NULL; - return 0; + retVal = 0; } - return 1; + return retVal; } /** diff --git a/routing/offline.c b/routing/offline.c index 901a0bf..3744f0c 100644 --- a/routing/offline.c +++ b/routing/offline.c @@ -68,11 +68,14 @@ int ipfs_routing_generic_get_value (ipfs_routing* routing, const unsigned char * req->sub_command = "get"; req->arguments = libp2p_utils_vector_new(1); libp2p_utils_vector_add(req->arguments, buffer); - if (!ipfs_core_http_request_get(routing->local_node, req, &response)) { + size_t response_size = 0; + if (!ipfs_core_http_request_get(routing->local_node, req, &response, &response_size)) { libp2p_logger_error("offline", "Unable to call API for dht get.\n"); + ipfs_core_http_request_free(req); return 0; } - *vlen = strlen(response); + ipfs_core_http_request_free(req); + *vlen = response_size; if (*vlen > 0) { *val = malloc(*vlen + 1); uint8_t* ptr = (uint8_t*)*val; @@ -94,6 +97,8 @@ int ipfs_routing_generic_get_value (ipfs_routing* routing, const unsigned char * // protobuf the node int protobuf_size = ipfs_hashtable_node_protobuf_encode_size(node); *val = malloc(protobuf_size); + if (*val == NULL) + goto exit; if (ipfs_hashtable_node_protobuf_encode(node, *val, protobuf_size, vlen) == 0) { goto exit; @@ -174,13 +179,15 @@ int ipfs_routing_offline_provide (ipfs_routing* offlineRouting, const unsigned c struct HttpRequest* request = ipfs_core_http_request_new(); request->command = "dht"; request->sub_command = "provide"; - request->arguments = libp2p_utils_vector_new(1); libp2p_utils_vector_add(request->arguments, buffer); - if (!ipfs_core_http_request_get(offlineRouting->local_node, request, &response)) { + size_t response_size = 0; + if (!ipfs_core_http_request_get(offlineRouting->local_node, request, &response, &response_size)) { libp2p_logger_error("offline", "Unable to call API for dht publish.\n"); + ipfs_core_http_request_free(request); return 0; } - fprintf(stdout, "%s", response); + ipfs_core_http_request_free(request); + fwrite(response, 1, response_size, stdout); return 1; } else { libp2p_logger_debug("offline", "Unable to announce that I can provide the hash, as API not available.\n"); diff --git a/routing/online.c b/routing/online.c index 3b1fe91..c4a1a60 100644 --- a/routing/online.c +++ b/routing/online.c @@ -53,12 +53,20 @@ int ipfs_routing_online_find_remote_providers(struct IpfsRouting* routing, const message->message_type = MESSAGE_TYPE_GET_PROVIDERS; message->key_size = key_size; message->key = malloc(message->key_size); + if (message->key == NULL) { + libp2p_message_free(message); + return 0; + } memcpy(message->key, key, message->key_size); - size_t b58size = 100; - uint8_t *b58key = (uint8_t *) malloc(b58size); - libp2p_crypto_encoding_base58_encode((unsigned char*)message->key, message->key_size, (unsigned char**) &b58key, &b58size); - libp2p_logger_debug("online", "find_remote_providers looking for key %s.\n", b58key); - free(b58key); + if (libp2p_logger_watching_class("online")) { + size_t b58size = 100; + uint8_t *b58key = (uint8_t *) malloc(b58size); + if (b58key != NULL) { + libp2p_crypto_encoding_base58_encode((unsigned char*)message->key, message->key_size, (unsigned char**) &b58key, &b58size); + libp2p_logger_debug("online", "find_remote_providers looking for key %s.\n", b58key); + free(b58key); + } + } // loop through the connected peers, asking for the hash struct Libp2pLinkedList* current_entry = routing->local_node->peerstore->head_entry; while (current_entry != NULL) { @@ -246,6 +254,10 @@ int ipfs_routing_online_provide(struct IpfsRouting* routing, const unsigned char struct KademliaMessage* msg = libp2p_message_new(); msg->key_size = key_size; msg->key = malloc(msg->key_size); + if (msg->key == NULL) { + libp2p_message_free(msg); + return 0; + } memcpy(msg->key, key, msg->key_size); msg->message_type = MESSAGE_TYPE_ADD_PROVIDER; msg->provider_peer_head = libp2p_utils_linked_list_new(); @@ -333,6 +345,10 @@ int ipfs_routing_online_get_peer_value(ipfs_routing* routing, const struct Libp2 struct KademliaMessage* msg = libp2p_message_new(); msg->key_size = key_size; msg->key = malloc(msg->key_size); + if (msg->key == NULL) { + libp2p_message_free(msg); + return 0; + } memcpy(msg->key, key, msg->key_size); msg->message_type = MESSAGE_TYPE_GET_VALUE; @@ -390,7 +406,7 @@ int ipfs_routing_online_get_value (ipfs_routing* routing, const unsigned char *k if (current_peer->is_local) { // it's a local fetch. Retrieve it libp2p_logger_debug("online", "It is a local fetch. Attempting get_value locally.\n"); - if (ipfs_routing_generic_get_value(routing, key, key_size, buffer, buffer_size) == 0) { + if (ipfs_routing_generic_get_value(routing, key, key_size, buffer, buffer_size)) { retVal = 1; break; } diff --git a/test/config.test1 b/test/config.test1 index f0296b8..33e4fe5 100644 --- a/test/config.test1 +++ b/test/config.test1 @@ -20,7 +20,7 @@ "/ip6/::/tcp/4001" ], "API": "/ip4/127.0.0.1/tcp/5002", - "Gateway": "(null)" + "Gateway": "/ip4/127.0.0.1/tcp/8080" }, "Mounts": { "IPFS": "/ipfs", diff --git a/test/config.test1.wo_journal b/test/config.test1.wo_journal index 44e442e..29e2dee 100644 --- a/test/config.test1.wo_journal +++ b/test/config.test1.wo_journal @@ -20,7 +20,7 @@ "/ip6/::/tcp/4001" ], "API": "/ip4/127.0.0.1/tcp/5001", - "Gateway": "(null)" + "Gateway": "/ip4/127.0.0.1/tcp/8080" }, "Mounts": { "IPFS": "/ipfs", diff --git a/test/config.test2 b/test/config.test2 index 87e9467..47e1336 100644 --- a/test/config.test2 +++ b/test/config.test2 @@ -20,7 +20,7 @@ "/ip6/::/tcp/4002" ], "API": "/ip4/127.0.0.1/tcp/5002", - "Gateway": "(null)" + "Gateway": "/ip4/127.0.0.1/tcp/8080" }, "Mounts": { "IPFS": "/ipfs", diff --git a/test/config.test2.wo_journal b/test/config.test2.wo_journal index d21ed94..266d45f 100644 --- a/test/config.test2.wo_journal +++ b/test/config.test2.wo_journal @@ -20,7 +20,7 @@ "/ip6/::/tcp/4002" ], "API": "/ip4/127.0.0.1/tcp/5002", - "Gateway": "(null)" + "Gateway": "/ip4/127.0.0.1/tcp/8080" }, "Mounts": { "IPFS": "/ipfs", diff --git a/test/core/test_api.h b/test/core/test_api.h index c57f198..5f0d69b 100644 --- a/test/core/test_api.h +++ b/test/core/test_api.h @@ -128,7 +128,7 @@ int test_core_api_object_cat() { // use a client to ask for the file on server 1 arguments = cli_arguments_new(5, args); - if (ipfs_exporter_object_cat(arguments) == 0) { + if (ipfs_exporter_object_cat(arguments, stdout) == 0) { libp2p_logger_error("test_api", "ipfs_exporter_object_cat returned false.\n"); goto exit; } @@ -144,6 +144,178 @@ int test_core_api_object_cat() { return retVal; } +/*** + * Attempt to get a binary file over the api + */ +int test_core_api_object_cat_binary() { + int retVal = 0; + pthread_t daemon_thread1; + int thread_started1 = 0; + char* ipfs_path1 = "/tmp/ipfs_1"; + char* config_file1 = "config.test1.wo_journal"; + struct FSRepo* fs_repo = NULL; + char hash[256] = ""; + char* args[] = {"ipfs", "--config", ipfs_path1, "cat", hash }; + struct CliArguments* arguments = NULL; + + // logging + libp2p_logger_add_class("test_api"); + libp2p_logger_add_class("journal"); + libp2p_logger_add_class("daemon"); + libp2p_logger_add_class("online"); + libp2p_logger_add_class("peer"); + libp2p_logger_add_class("null"); + libp2p_logger_add_class("replication"); + libp2p_logger_add_class("fs_repo"); + libp2p_logger_add_class("lmdb_journalstore"); + libp2p_logger_add_class("lmdb_datastore"); + libp2p_logger_add_class("secio"); + libp2p_logger_add_class("socket"); + libp2p_logger_add_class("protocol"); + libp2p_logger_add_class("dht_protocol"); + libp2p_logger_add_class("resolver"); + libp2p_logger_add_class("unixfs"); + libp2p_logger_add_class("bitswap_engine"); + libp2p_logger_add_class("bitswap_network"); + libp2p_logger_add_class("exporter"); + libp2p_logger_add_class("api"); + + // build repo + if (!drop_build_open_repo(ipfs_path1, &fs_repo, config_file1)) { + ipfs_repo_fsrepo_free(fs_repo); + libp2p_logger_error("test_api", "Unable to drop and build repository at %s\n", ipfs_path1); + goto exit; + } + libp2p_logger_debug("test_api", "Changed the server id to %s.\n", fs_repo->config->identity->peer->id); + ipfs_repo_fsrepo_free(fs_repo); + + // add a file to the repo + uint8_t bytes[256]; + for(int i = 0; i < 256; i++) + bytes[i] = i; + char* filename = "test1.txt"; + create_file(filename, bytes, 256); + struct HashtableNode* node; + size_t bytes_written; + struct IpfsNode *local_node = NULL; + ipfs_node_offline_new(ipfs_path1, &local_node); + ipfs_import_file(NULL, filename, &node, local_node, &bytes_written, 0); + memset(hash, 0, 256); + ipfs_cid_hash_to_base58(node->hash, node->hash_size, (unsigned char*)hash, 256); + libp2p_logger_debug("test_api", "Inserted file with hash %s.\n", hash); + ipfs_node_free(local_node); + ipfs_hashtable_node_free(node); + + libp2p_logger_debug("test_api", "*** Firing up daemons ***\n"); + pthread_create(&daemon_thread1, NULL, test_daemon_start, (void*)ipfs_path1); + thread_started1 = 1; + sleep(3); + + // use a client to ask for the file + arguments = cli_arguments_new(5, args); + if (ipfs_exporter_object_cat(arguments, stdout) == 0) { + libp2p_logger_error("test_api", "ipfs_exporter_object_cat returned false.\n"); + goto exit; + } + + retVal = 1; + exit: + ipfs_daemon_stop(); + if (thread_started1) + pthread_join(daemon_thread1, NULL); + cli_arguments_free(arguments); + return retVal; +} + +/*** + * Attempt to get a large (>256K) binary file over the api + */ +int test_core_api_object_cat_large_binary() { + int retVal = 0; + pthread_t daemon_thread1; + int thread_started1 = 0; + char* ipfs_path1 = "/tmp/ipfs_1"; + char* config_file1 = "config.test1.wo_journal"; + struct FSRepo* fs_repo = NULL; + char hash[256] = ""; + char* args[] = {"ipfs", "--config", ipfs_path1, "cat", hash }; + struct CliArguments* arguments = NULL; + + // logging + libp2p_logger_add_class("test_api"); + libp2p_logger_add_class("journal"); + libp2p_logger_add_class("daemon"); + libp2p_logger_add_class("online"); + libp2p_logger_add_class("peer"); + libp2p_logger_add_class("null"); + libp2p_logger_add_class("replication"); + libp2p_logger_add_class("fs_repo"); + libp2p_logger_add_class("lmdb_journalstore"); + libp2p_logger_add_class("lmdb_datastore"); + libp2p_logger_add_class("secio"); + libp2p_logger_add_class("socket"); + libp2p_logger_add_class("protocol"); + libp2p_logger_add_class("dht_protocol"); + libp2p_logger_add_class("resolver"); + libp2p_logger_add_class("unixfs"); + libp2p_logger_add_class("bitswap_engine"); + libp2p_logger_add_class("bitswap_network"); + libp2p_logger_add_class("exporter"); + libp2p_logger_add_class("api"); + + // build repo + if (!drop_build_open_repo(ipfs_path1, &fs_repo, config_file1)) { + ipfs_repo_fsrepo_free(fs_repo); + libp2p_logger_error("test_api", "Unable to drop and build repository at %s\n", ipfs_path1); + goto exit; + } + libp2p_logger_debug("test_api", "Changed the server id to %s.\n", fs_repo->config->identity->peer->id); + ipfs_repo_fsrepo_free(fs_repo); + + // add a file to the repo + uint8_t bytes[300000]; + for(int i = 0; i < 300000; i++) + bytes[i] = i % 255; + char* filename = "test1.txt"; + create_file(filename, bytes, 300000); + struct HashtableNode* node; + size_t bytes_written; + struct IpfsNode *local_node = NULL; + ipfs_node_offline_new(ipfs_path1, &local_node); + ipfs_import_file(NULL, filename, &node, local_node, &bytes_written, 0); + memset(hash, 0, 256); + ipfs_cid_hash_to_base58(node->hash, node->hash_size, (unsigned char*)hash, 256); + libp2p_logger_debug("test_api", "Inserted file with hash %s.\n", hash); + ipfs_node_free(local_node); + ipfs_hashtable_node_free(node); + + libp2p_logger_debug("test_api", "*** Firing up daemons ***\n"); + pthread_create(&daemon_thread1, NULL, test_daemon_start, (void*)ipfs_path1); + thread_started1 = 1; + sleep(3); + + // use a client to ask for the file + arguments = cli_arguments_new(5, args); + char* filename2 = "test2.txt"; + unlink(filename2); + FILE* fd = fopen(filename2, "w" ); + if (ipfs_exporter_object_cat(arguments, fd) == 0) { + libp2p_logger_error("test_api", "ipfs_exporter_object_cat returned false.\n"); + fclose(fd); + goto exit; + } + fclose(fd); + + retVal = 1; + exit: + ipfs_daemon_stop(); + if (thread_started1) + pthread_join(daemon_thread1, NULL); + cli_arguments_free(arguments); + return retVal; +} + + int test_core_api_name_resolve() { int retVal = 0; pthread_t daemon_thread1; diff --git a/test/exchange/test_bitswap.h b/test/exchange/test_bitswap.h index 2c39c72..f32f4b0 100644 --- a/test/exchange/test_bitswap.h +++ b/test/exchange/test_bitswap.h @@ -504,3 +504,166 @@ int test_bitswap_retrieve_file_third_party() { } +/*** + * Attempt to retrieve a file from a known node + */ +int test_bitswap_retrieve_file_go_remote() { + int retVal = 0; + signal(SIGPIPE, SIG_IGN); + /*** + * This assumes a remote server with the hello_world.txt file already in its database + */ + int remote_port = 4001; + // mac + char* remote_peer_id = "QmNgGtj3Dk7RPRFgyp9exmq2WjaUf3gkwhMF1vkmcyXcBD"; + char* remote_ip = "10.211.55.2"; + // linux + //char* remote_peer_id = "QmRKm1d9kSCRpMFtLYpfhhCQ3DKuSSPJa3qn9wWXfwnWnY"; + //char* remote_ip = "10.211.55.4"; + char* hello_world_hash = "QmYAXgX8ARiriupMQsbGXtKdDyGzWry1YV3sycKw1qqmgH"; + + /* + libp2p_logger_add_class("dht_protocol"); + libp2p_logger_add_class("providerstore"); + libp2p_logger_add_class("peerstore"); + libp2p_logger_add_class("exporter"); + libp2p_logger_add_class("peer"); + */ + libp2p_logger_add_class("test_bitswap"); + libp2p_logger_add_class("null"); + libp2p_logger_add_class("online"); + libp2p_logger_add_class("multistream"); + libp2p_logger_add_class("secio"); + libp2p_logger_add_class("bitswap"); + libp2p_logger_add_class("bitswap_engine"); + libp2p_logger_add_class("bitswap_network"); + + char* ipfs_path = "/tmp/test1"; + char peer_ma_1[80] = "", *peer_id_2 = NULL; + struct IpfsNode* ipfs_node2 = NULL; + struct MultiAddress* ma_peer1 = NULL; + struct Libp2pVector* ma_vector2 = NULL; + struct Block* result = NULL; + struct Cid* cid = NULL; + + // create a MultiAddress of the GO peer (peer 1) + sprintf(peer_ma_1, "/ip4/%s/tcp/%d/ipfs/%s", remote_ip, remote_port, remote_peer_id); + ma_peer1 = multiaddress_new_from_string(peer_ma_1); + + // create my peer, peer 2 + libp2p_logger_debug("test_routing", "Firing up the client\n"); + ipfs_path = "/tmp/test2"; + ma_vector2 = libp2p_utils_vector_new(1); + libp2p_utils_vector_add(ma_vector2, ma_peer1); + drop_and_build_repository(ipfs_path, 4002, ma_vector2, &peer_id_2); + multiaddress_free(ma_peer1); + ipfs_node_online_new(ipfs_path, &ipfs_node2); + + if (!ipfs_cid_decode_hash_from_base58((unsigned char*)hello_world_hash, strlen(hello_world_hash), &cid)) + goto exit; + + sleep(3); + + // this does the heavy lifting... + if (!ipfs_node2->exchange->GetBlock(ipfs_node2->exchange, cid, &result)) { + libp2p_logger_error("test_bitswap", "GetBlock returned false\n"); + goto exit; + } + + if (result == NULL) { + libp2p_logger_error("test_bitswap", "GetBlock returned NULL"); + goto exit; + } + + if (result->cid == NULL) { + libp2p_logger_error("test_bitswap", "GetBlock returned an object with no CID"); + goto exit; + } + + if (cid->hash_length != result->cid->hash_length) { + libp2p_logger_error("test_bitswap", "Node hash sizes do not match. Should be %lu but is %lu\n", strlen(hello_world_hash), result->cid->hash_length); + goto exit; + } + + retVal = 1; + exit: + if (peer_id_2 != NULL) + free(peer_id_2); + if (ma_vector2 != NULL) { + libp2p_utils_vector_free(ma_vector2); + } + if (cid != NULL) + ipfs_cid_free(cid); + ipfs_node_free(ipfs_node2); + if (ma_peer1 != NULL) + multiaddress_free(ma_peer1); + return retVal; +} + +/*** + * Attempt to retrieve a file from a known node + */ +int test_bitswap_serve_file_go_remote() { + int retVal = 0; + signal(SIGPIPE, SIG_IGN); + /*** + * This assumes a remote client asking for hello.txt + */ + // linux + //char* remote_peer_id = "QmRKm1d9kSCRpMFtLYpfhhCQ3DKuSSPJa3qn9wWXfwnWnY"; + //char* remote_ip = "10.211.55.4"; + //char* hello_world_hash = "QmYAXgX8ARiriupMQsbGXtKdDyGzWry1YV3sycKw1qqmgH"; + + /* + libp2p_logger_add_class("dht_protocol"); + libp2p_logger_add_class("providerstore"); + libp2p_logger_add_class("peerstore"); + libp2p_logger_add_class("exporter"); + libp2p_logger_add_class("peer"); + */ + libp2p_logger_add_class("test_bitswap"); + libp2p_logger_add_class("null"); + libp2p_logger_add_class("online"); + libp2p_logger_add_class("multistream"); + libp2p_logger_add_class("secio"); + libp2p_logger_add_class("bitswap"); + libp2p_logger_add_class("bitswap_engine"); + libp2p_logger_add_class("bitswap_network"); + + char* ipfs_path = "/tmp/ipfs_2"; + char *peer_id_2 = NULL; + struct FSRepo* fs_repo; + pthread_t thread2; + + // create my peer, peer 2 + libp2p_logger_debug("test_bitswap", "Firing up the client\n"); + drop_build_open_repo(ipfs_path, &fs_repo, "config.test2.wo_journal"); + ipfs_repo_fsrepo_free(fs_repo); + + // add file + char* bytes = "Hello, World!"; + char* filename = "hello.txt"; + create_file(filename, (unsigned char*)bytes, strlen(bytes)); + char* argv[] = { "ipfs" , "--config", ipfs_path, "add", filename}; + struct CliArguments args; + args.argc = 5; + args.argv = argv; + args.config_dir = ipfs_path; + args.verb_index = 3; + ipfs_import_files(&args); + + if (pthread_create(&thread2, NULL, test_daemon_start, (void*)ipfs_path) < 0) { + libp2p_logger_error("test_bitswap", "Unable to start thread 2\n"); + goto exit; + } + + sleep(120); + + retVal = 1; + exit: + ipfs_daemon_stop(); + if (peer_id_2 != NULL) + free(peer_id_2); + return retVal; +} + diff --git a/test/routing/test_routing.h b/test/routing/test_routing.h index 8381426..bf23227 100644 --- a/test/routing/test_routing.h +++ b/test/routing/test_routing.h @@ -30,7 +30,7 @@ int test_routing_put_value() { struct CliArguments* arguments = NULL; libp2p_logger_add_class("test_routing"); - + libp2p_logger_add_class("api"); // fire up the "publisher" if (!drop_and_build_repository(ipfs_path_publisher, 4001, NULL, &peer_id_publisher)) { libp2p_logger_error("test_routing", "Unable to drop and build repository.\n"); @@ -65,8 +65,10 @@ int test_routing_put_value() { // see if we have what we should... libp2p_logger_debug("test_routing", "About to ask for the server to resolve the publisher.\n"); - char* args3[] = {"ipfs", "--config", ipfs_path_publisher, "resolve", peer_id_publisher}; - arguments = cli_arguments_new(5, args3); + char ipns[126]; + sprintf(ipns, "/ipns/%s", peer_id_publisher); + char* args3[] = {"ipfs", "--config", ipfs_path_publisher, "name", "resolve", ipns}; + arguments = cli_arguments_new(6, args3); if (!ipfs_name(arguments)) goto exit; @@ -255,6 +257,8 @@ int test_routing_find_peer() { pthread_join(thread1, NULL); if (thread2_started) pthread_join(thread2, NULL); + if (thread3_started) + pthread_join(thread3, NULL); if (peer_id_1 != NULL) free(peer_id_1); if (peer_id_2 != NULL) @@ -513,6 +517,7 @@ int test_routing_retrieve_file_third_party() { char multiaddress_string[255] = ""; char hash[256] = ""; + /* libp2p_logger_add_class("online"); libp2p_logger_add_class("offline"); libp2p_logger_add_class("multistream"); @@ -525,6 +530,7 @@ int test_routing_retrieve_file_third_party() { libp2p_logger_add_class("test_routing"); libp2p_logger_add_class("api"); libp2p_logger_add_class("secio"); + */ // clean out repository diff --git a/test/scripts/Makefile b/test/scripts/Makefile new file mode 100644 index 0000000..13d0606 --- /dev/null +++ b/test/scripts/Makefile @@ -0,0 +1,13 @@ +CC = gcc + +%.o: %.c + $(CC) -c -o $@ $< + +generate_file: generate_file.o + $(CC) -o $@ $^ + +all: generate_file + +clean: + rm -f *.o + rm -f generate_file diff --git a/test/scripts/generate_file.c b/test/scripts/generate_file.c new file mode 100644 index 0000000..eb1bc5d --- /dev/null +++ b/test/scripts/generate_file.c @@ -0,0 +1,26 @@ +#include +#include + +int main(int argc, char** argv) { + if (argc != 3) { + fprintf(stderr, "Syntax: %s \n", argv[0]); + exit(1); + } + + char* filename = argv[1]; + long file_size = atoi(argv[2]); + + FILE* fd = fopen(filename, "w"); + if (!fd) { + fprintf(stderr, "Unable to open the file %s for writing.\n", filename); + exit(1); + } + + for(size_t i = 0; i < file_size; i++) { + char byte = i % 255; + fwrite(&byte, 1, 1, fd); + } + + fclose(fd); + return 0; +} diff --git a/test/scripts/run_test.sh b/test/scripts/run_test.sh index 79ea10b..7b0635a 100755 --- a/test/scripts/run_test.sh +++ b/test/scripts/run_test.sh @@ -1,10 +1,17 @@ #!/bin/bash -TEST_FILE=$1 +TEST_FILE=test_$1.sh + +test_success=0 source ./$TEST_FILE -pre -body -post +function do_it() { + pre + body + post + return $? +} +do_it +exit $test_success diff --git a/test/scripts/run_tests.sh b/test/scripts/run_tests.sh index 631ef69..20affc7 100755 --- a/test/scripts/run_tests.sh +++ b/test/scripts/run_tests.sh @@ -1,8 +1,29 @@ #!/bin/bash -./run_test.sh test_1.sh -./run_test.sh test_2.sh -./run_test.sh test_3.sh -./run_test.sh test_4.sh -./run_test.sh test_5.sh -./run_test.sh test_6.sh +#source ./test_helpers.sh + +rm testlog.txt + +num_tests=9 +tests_passed=0 +tests_failed=0 +i=1 +while [ $i -le $num_tests ]; do + echo *Running test number $i + echo *Running test number $i >> testlog.txt + ./run_test.sh $i >>testlog.txt 2>&1 + retVal=$? + if [ $retVal -ne 0 ]; then + echo *Test number $i failed + echo *Test number $i failed >> testlog.txt + let tests_failed++ + else + echo *Test number $i passed + echo *Test number $i passed >> testlog.txt + let tests_passed++ + fi + let i++ +done +echo Of $num_tests tests, $tests_passed passed and $tests_failed failed. +echo Of $num_tests tests, $tests_passed passed and $tests_failed failed. >> testlog.txt +echo Results are in testlog.txt diff --git a/test/scripts/test_1.sh b/test/scripts/test_1.sh index 3acbd4f..bc0c373 100755 --- a/test/scripts/test_1.sh +++ b/test/scripts/test_1.sh @@ -1,15 +1,48 @@ #!/bin/bash +#### +# Attempt to retrieve large binary file from running daemon +# +#### + +source ./test_helpers.sh + +IPFS="../../main/ipfs --config /tmp/ipfs_1" + function pre { -echo 'in pre'; + rm -Rf /tmp/ipfs_1 + eval "$IPFS" init; + check_failure_with_exit "pre" $? + cp ../config.test1.wo_journal /tmp/ipfs_1/config } function post { -echo 'in post'; + rm -Rf /tmp/ipfs_1; + rm hello.bin; + rm hello2.bin; } function body { - echo 'in body'; + retVal=0 + create_binary_file 300000; + eval "$IPFS" add hello.bin + check_failure_with_exit "add hello.bin" $? + + #start the daemon + eval "../../main/ipfs --config /tmp/ipfs_1 daemon &" + daemon_id=$! + sleep 5 + + eval "$IPFS" cat QmQY3qveNvosAgRhcgVVgkPPLZv4fpWuxhL3pfihzgKtTf > hello2.bin + check_failure "cat" $? + + # file size should be 300000 + actualsize=$(wc -c < hello2.bin) + if [ $actualsize -ne 300000 ]; then + echo '*** Failure *** file size incorrect' + let retVal=1 + fi + + kill -9 $daemon_id + return $retVal } - - diff --git a/test/scripts/test_2.sh b/test/scripts/test_2.sh index cdd4033..2b27615 100755 --- a/test/scripts/test_2.sh +++ b/test/scripts/test_2.sh @@ -7,7 +7,7 @@ IPFS="../../main/ipfs --config /tmp/ipfs_1" function pre { post eval "$IPFS" init; - check_failure "pre" $? + check_failure_with_exit "pre" $? } function post { @@ -18,10 +18,10 @@ function post { function body { create_hello_world; eval "$IPFS" add hello.txt - check_failure "add hello.txt" $? + check_failure_with_exit "add hello.txt" $? eval "$IPFS" cat QmYAXgX8ARiriupMQsbGXtKdDyGzWry1YV3sycKw1qqmgH - check_failure "cat hello.txt" $? + check_failure_with_exit "cat hello.txt" $? } diff --git a/test/scripts/test_3.sh b/test/scripts/test_3.sh index 283ea48..c564139 100755 --- a/test/scripts/test_3.sh +++ b/test/scripts/test_3.sh @@ -12,7 +12,7 @@ IPFS="../../main/ipfs --config /tmp/ipfs_1" function pre { post eval "$IPFS" init; - check_failure "pre" $? + check_failure_with_exit "pre" $? } function post { @@ -23,7 +23,7 @@ function post { function body { create_hello_world; eval "$IPFS" add hello.txt - check_failure "add hello.txt" $? + check_failure_with_exit "add hello.txt" $? #start the daemon eval "../../main/ipfs --config /tmp/ipfs_1 daemon &" @@ -31,7 +31,7 @@ function body { sleep 5 eval "$IPFS" cat QmYAXgX8ARiriupMQsbGXtKdDyGzWry1YV3sycKw1qqmgH - check_failure "cat hello.txt" $? + check_failure_with_exit "cat hello.txt" $? kill -9 $daemon_id } diff --git a/test/scripts/test_4.sh b/test/scripts/test_4.sh index 3e93cfc..2bef821 100755 --- a/test/scripts/test_4.sh +++ b/test/scripts/test_4.sh @@ -12,7 +12,7 @@ IPFS="../../main/ipfs --config /tmp/ipfs_1" function pre { post eval "$IPFS" init; - check_failure "pre" $? + check_failure_with_exit "pre" $? cp ../config.test1.wo_journal /tmp/ipfs_1/config } @@ -24,7 +24,7 @@ function post { function body { create_hello_world; eval "$IPFS" add hello.txt - check_failure "add hello.txt" $? + check_failure_with_exit "add hello.txt" $? #start the daemon eval "../../main/ipfs --config /tmp/ipfs_1 daemon &" @@ -32,10 +32,10 @@ function body { sleep 5 eval "$IPFS" name publish QmYAXgX8ARiriupMQsbGXtKdDyGzWry1YV3sycKw1qqmgH - check_failure "name publish" $? + check_failure_with_exit "name publish" $? eval "$IPFS" name resolve /ipns/QmZVoAZGFfinB7MQQiDzB84kWaDPQ95GLuXdemJFM2r9b4 - check_failure "name resolve" $? + check_failure_with_exit "name resolve" $? kill -9 $daemon_id } diff --git a/test/scripts/test_5.sh b/test/scripts/test_5.sh index 291d3f3..7a27d46 100755 --- a/test/scripts/test_5.sh +++ b/test/scripts/test_5.sh @@ -12,11 +12,11 @@ IPFS2="../../main/ipfs --config /tmp/ipfs_2" function pre { post eval "$IPFS1" init; - check_failure "pre" $? + check_failure_with_exit "pre" $? cp ../config.test1.wo_journal /tmp/ipfs_1/config eval "$IPFS2" init; - check_failure "pre ipfs2" $? + check_failure_with_exit "pre ipfs2" $? cp ../config.test2.wo_journal /tmp/ipfs_2/config } @@ -29,7 +29,7 @@ function post { function body { create_hello_world; eval "$IPFS1" add hello.txt - check_failure "add hello.txt" $? + check_failure_with_exit "add hello.txt" $? #start the daemons eval "../../main/ipfs --config /tmp/ipfs_1 daemon &" diff --git a/test/scripts/test_6.sh b/test/scripts/test_6.sh index cd2e06e..16115c3 100755 --- a/test/scripts/test_6.sh +++ b/test/scripts/test_6.sh @@ -12,7 +12,7 @@ IPFS="../../main/ipfs --config /tmp/ipfs_1" function pre { post eval "$IPFS" init; - check_failure "pre" $? + check_failure_with_exit "pre" $? cp ../config.test1.wo_journal /tmp/ipfs_1/config } @@ -24,7 +24,7 @@ function post { function body { create_hello_world; eval "$IPFS" add hello.txt - check_failure "add hello.txt" $? + check_failure_with_exit "add hello.txt" $? #start the daemon eval "../../main/ipfs --config /tmp/ipfs_1 daemon &" @@ -32,7 +32,7 @@ function body { sleep 5 eval "$IPFS" name publish QmYAXgX8ARiriupMQsbGXtKdDyGzWry1YV3sycKw1qqmgH - check_failure "name publish" $? + check_failure_with_exit "name publish" $? kill -9 $daemon_id } diff --git a/test/scripts/test_7.sh b/test/scripts/test_7.sh new file mode 100755 index 0000000..a066062 --- /dev/null +++ b/test/scripts/test_7.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +#### +# Attempt to add and retrieve binary file from running daemon +# +#### + +source ./test_helpers.sh + +IPFS="../../main/ipfs --config /tmp/ipfs_1" + +function pre { + rm -Rf /tmp/ipfs_1 + eval "$IPFS" init; + check_failure_with_exit "pre" $? + cp ../config.test1.wo_journal /tmp/ipfs_1/config +} + +function post { + rm -Rf /tmp/ipfs_1; + rm hello.bin; + rm hello2.bin; +} + +function body { + create_binary_file 256; + + #start the daemon + eval "../../main/ipfs --config /tmp/ipfs_1 daemon &" + daemon_id=$! + sleep 5 + + # add file + eval "$IPFS" add hello.bin + check_failure_with_exit "add hello.bin" $? + sleep 5 + + # retrieve file + eval "$IPFS" cat QmX4zpwaE7CSgZZsULgoB3gXYC6hh7RN19bEfWxw7sL8Xx > hello2.bin + check_failure_with_exit "cat" $? + + # file size should be 256 + actualsize=$(wc -c < hello2.bin) + if [ $actualsize -ne 256 ]; then + echo '*** Failure *** file size incorrect' + fi + + kill -9 $daemon_id +} diff --git a/test/scripts/test_8.sh b/test/scripts/test_8.sh new file mode 100755 index 0000000..a066062 --- /dev/null +++ b/test/scripts/test_8.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +#### +# Attempt to add and retrieve binary file from running daemon +# +#### + +source ./test_helpers.sh + +IPFS="../../main/ipfs --config /tmp/ipfs_1" + +function pre { + rm -Rf /tmp/ipfs_1 + eval "$IPFS" init; + check_failure_with_exit "pre" $? + cp ../config.test1.wo_journal /tmp/ipfs_1/config +} + +function post { + rm -Rf /tmp/ipfs_1; + rm hello.bin; + rm hello2.bin; +} + +function body { + create_binary_file 256; + + #start the daemon + eval "../../main/ipfs --config /tmp/ipfs_1 daemon &" + daemon_id=$! + sleep 5 + + # add file + eval "$IPFS" add hello.bin + check_failure_with_exit "add hello.bin" $? + sleep 5 + + # retrieve file + eval "$IPFS" cat QmX4zpwaE7CSgZZsULgoB3gXYC6hh7RN19bEfWxw7sL8Xx > hello2.bin + check_failure_with_exit "cat" $? + + # file size should be 256 + actualsize=$(wc -c < hello2.bin) + if [ $actualsize -ne 256 ]; then + echo '*** Failure *** file size incorrect' + fi + + kill -9 $daemon_id +} diff --git a/test/scripts/test_9.sh b/test/scripts/test_9.sh new file mode 100755 index 0000000..de8e39a --- /dev/null +++ b/test/scripts/test_9.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +#### +# Attempt to retrieve binary file from running daemon +# +#### + +source ./test_helpers.sh + +IPFS="../../main/ipfs --config /tmp/ipfs_1" + +function pre { + rm -Rf /tmp/ipfs_1 + eval "$IPFS" init; + check_failure_with_exit "pre" $? + cp ../config.test1.wo_journal /tmp/ipfs_1/config +} + +function post { + rm -Rf /tmp/ipfs_1; + rm hello.bin; + rm hello2.bin; +} + +function body { + create_binary_file 256; + eval "$IPFS" add hello.bin + check_failure_with_exit "add hello.bin" $? + + #start the daemon + eval "../../main/ipfs --config /tmp/ipfs_1 daemon &" + daemon_id=$! + sleep 5 + + eval "$IPFS" cat QmX4zpwaE7CSgZZsULgoB3gXYC6hh7RN19bEfWxw7sL8Xx > hello2.bin + check_failure_with_exit "cat" $? + + # file size should be 256 + actualsize=$(wc -c < hello2.bin) + if [ $actualsize -ne 256 ]; then + echo '*** Failure *** file size incorrect' + fi + + kill -9 $daemon_id +} diff --git a/test/scripts/test_helpers.sh b/test/scripts/test_helpers.sh old mode 100644 new mode 100755 index 1fc699f..e1d10fd --- a/test/scripts/test_helpers.sh +++ b/test/scripts/test_helpers.sh @@ -1,5 +1,10 @@ #!/bin/bash +##### +# global to keep track of failures +##### +failure_count=0 + ##### # Functions to help with test scripts ##### @@ -11,6 +16,18 @@ function create_hello_world { echo 'Hello, World!' > hello.txt } +#### +# Create a binary file +### +function create_binary_file { + rm hello.bin + num_bytes=$1 + if [ $num_bytes -eq 0 ]; then + num_bytes=255; + fi + exec ./generate_file hello.bin $num_bytes +} + #### # Checks the return code and displays message if return code is not 0 # Param $1 name of function @@ -22,5 +39,20 @@ function check_failure() { if [ $RESULT -ne 0 ]; then echo "***Failure*** in $FUNC. The return value was $RESULT"; fi + return $RESULT } +#### +# Checks the return code and displays message if return code is not 0 +# Param $1 name of function +# Param $2 return code +#### +function check_failure_with_exit() { + FUNC=$1; + RESULT=$2; + if [ $RESULT -ne 0 ]; then + echo "***Failure*** in $FUNC. The return value was $RESULT"; + exit $RESULT + fi + return $RESULT +} diff --git a/test/testit.c b/test/testit.c index 11993f5..8b41e60 100644 --- a/test/testit.c +++ b/test/testit.c @@ -49,6 +49,7 @@ int testit(const char* name, int (*func)(void)) { return retVal == 0; } +<<<<<<< HEAD int add_test(const char* name, int (*func)(void), int part_of_suite) { // create a new test struct test* t = (struct test*) malloc(sizeof(struct test)); @@ -67,7 +68,83 @@ int add_test(const char* name, int (*func)(void), int part_of_suite) { last_test->next = t; } last_test = t; +======= +const char* names[] = { + "test_bitswap_new_free", + "test_bitswap_peer_request_queue_new", + "test_bitswap_retrieve_file", + "test_bitswap_retrieve_file_go_remote", + "test_bitswap_retrieve_file_known_remote", + "test_bitswap_retrieve_file_remote", + "test_bitswap_retrieve_file_third_party", + "test_bitswap_serve_file_go_remote", + "test_cid_new_free", + "test_cid_cast_multihash", + "test_cid_cast_non_multihash", + "test_cid_protobuf_encode_decode", + "test_core_api_startup_shutdown", + "test_core_api_object_cat", + "test_core_api_object_cat_binary", + "test_core_api_object_cat_large_binary", + "test_core_api_name_resolve", + "test_core_api_name_resolve_1", + "test_core_api_name_resolve_2", + "test_core_api_name_resolve_3", + "test_daemon_startup_shutdown", + "test_datastore_list_journal", + "test_journal_db", + "test_journal_encode_decode", + "test_journal_server_1", + "test_journal_server_2", + "test_repo_config_new", + "test_repo_config_init", + "test_repo_config_write", + "test_repo_config_identity_new", + "test_repo_config_identity_private_key", + "test_repo_fsrepo_write_read_block", + "test_repo_fsrepo_build", + "test_routing_supernode_start", + "test_get_init_command", + "test_import_small_file", + "test_import_large_file", + "test_repo_fsrepo_open_config", + "test_flatfs_get_directory", + "test_flatfs_get_filename", + "test_flatfs_get_full_filename", + "test_ds_key_from_binary", + "test_blocks_new", + "test_repo_bootstrap_peers_init", + "test_ipfs_datastore_put", + "test_node", + "test_node_link_encode_decode", + "test_node_encode_decode", + "test_node_peerstore", + "test_merkledag_add_data", + "test_merkledag_get_data", + "test_merkledag_add_node", + "test_merkledag_add_node_with_links", + // 50 below + "test_namesys_publisher_publish", + "test_namesys_resolver_resolve", + "test_resolver_get", + "test_routing_find_peer", + "test_routing_provide", + "test_routing_find_providers", + "test_routing_put_value", + "test_routing_supernode_get_value", + "test_routing_supernode_get_remote_value", + "test_routing_retrieve_file_third_party", + "test_routing_retrieve_large_file", + "test_unixfs_encode_decode", + "test_unixfs_encode_smallfile", + "test_ping", + "test_ping_remote", + "test_null_add_provider", + "test_resolver_remote_get" +}; +>>>>>>> branch 'master' of https://github.com/Agorise/c-ipfs +<<<<<<< HEAD if (last_test == NULL) return 0; return last_test->index; @@ -142,6 +219,81 @@ int build_test_collection() { add_test("test_null_add_provider", test_null_add_provider, 1); return add_test("test_resolver_remote_get", test_resolver_remote_get, 1); } +======= +int (*funcs[])(void) = { + test_bitswap_new_free, + test_bitswap_peer_request_queue_new, + test_bitswap_retrieve_file, + test_bitswap_retrieve_file_go_remote, + test_bitswap_retrieve_file_known_remote, + test_bitswap_retrieve_file_remote, + test_bitswap_retrieve_file_third_party, + test_bitswap_serve_file_go_remote, + test_cid_new_free, + test_cid_cast_multihash, + test_cid_cast_non_multihash, + test_cid_protobuf_encode_decode, + test_core_api_startup_shutdown, + test_core_api_object_cat, + test_core_api_object_cat_binary, + test_core_api_object_cat_large_binary, + test_core_api_name_resolve, + test_core_api_name_resolve_1, + test_core_api_name_resolve_2, + test_core_api_name_resolve_3, + test_daemon_startup_shutdown, + test_datastore_list_journal, + test_journal_db, + test_journal_encode_decode, + test_journal_server_1, + test_journal_server_2, + test_repo_config_new, + test_repo_config_init, + test_repo_config_write, + test_repo_config_identity_new, + test_repo_config_identity_private_key, + test_repo_fsrepo_write_read_block, + test_repo_fsrepo_build, + test_routing_supernode_start, + test_get_init_command, + test_import_small_file, + test_import_large_file, + test_repo_fsrepo_open_config, + test_flatfs_get_directory, + test_flatfs_get_filename, + test_flatfs_get_full_filename, + test_ds_key_from_binary, + test_blocks_new, + test_repo_bootstrap_peers_init, + test_ipfs_datastore_put, + test_node, + test_node_link_encode_decode, + test_node_encode_decode, + test_node_peerstore, + test_merkledag_add_data, + test_merkledag_get_data, + test_merkledag_add_node, + test_merkledag_add_node_with_links, + // 50 below + test_namesys_publisher_publish, + test_namesys_resolver_resolve, + test_resolver_get, + test_routing_find_peer, + test_routing_provide, + test_routing_find_providers, + test_routing_put_value, + test_routing_supernode_get_value, + test_routing_supernode_get_remote_value, + test_routing_retrieve_file_third_party, + test_routing_retrieve_large_file, + test_unixfs_encode_decode, + test_unixfs_encode_smallfile, + test_ping, + test_ping_remote, + test_null_add_provider, + test_resolver_remote_get +}; +>>>>>>> branch 'master' of https://github.com/Agorise/c-ipfs /** * Pull the next test name from the command line diff --git a/unixfs/unixfs.c b/unixfs/unixfs.c index e0d9a31..9ea4e78 100644 --- a/unixfs/unixfs.c +++ b/unixfs/unixfs.c @@ -139,11 +139,15 @@ int ipfs_unixfs_add_data(unsigned char* data, size_t data_length, struct UnixFS* } // debug: display hash - size_t b58size = 100; - uint8_t *b58key = (uint8_t *) malloc(b58size); - libp2p_crypto_encoding_base58_encode(unix_fs->hash, unix_fs->hash_length, &b58key, &b58size); - libp2p_logger_debug("unixfs", "Saving hash of %s to unixfs object.\n", b58key); - free(b58key); + if (libp2p_logger_watching_class("unixfs")) { + size_t b58size = 100; + uint8_t *b58key = (uint8_t *) malloc(b58size); + if (b58key != NULL) { + libp2p_crypto_encoding_base58_encode(unix_fs->hash, unix_fs->hash_length, &b58key, &b58size); + libp2p_logger_debug("unixfs", "Saving hash of %s to unixfs object.\n", b58key); + free(b58key); + } + } return 1; @@ -155,6 +159,9 @@ int ipfs_unixfs_add_blocksize(const struct UnixFSBlockSizeNode* blocksize, struc if (last == NULL) { // we're the first one unix_fs->block_size_head = (struct UnixFSBlockSizeNode*)malloc(sizeof(struct UnixFSBlockSizeNode)); + if (unix_fs->block_size_head == NULL) { + return 0; + } unix_fs->block_size_head->block_size = blocksize->block_size; unix_fs->block_size_head->next = NULL; } else { @@ -163,6 +170,8 @@ int ipfs_unixfs_add_blocksize(const struct UnixFSBlockSizeNode* blocksize, struc last = last->next; } last->next = (struct UnixFSBlockSizeNode*)malloc(sizeof(struct UnixFSBlockSizeNode)); + if (last->next == NULL) + return 0; last->next->block_size = blocksize->block_size; last->next->next = NULL; }