Better handling of bad memory allocation
This commit is contained in:
parent
996687cfce
commit
71c216defb
25 changed files with 299 additions and 117 deletions
|
@ -175,9 +175,19 @@ int ipfs_block_free(struct Block* block) {
|
|||
*/
|
||||
struct Block* ipfs_block_copy(struct Block* original) {
|
||||
struct Block* copy = ipfs_block_new();
|
||||
copy->data_length = original->data_length;
|
||||
copy->data = (unsigned char*) malloc(original->data_length);
|
||||
memcpy(copy->data, original->data, original->data_length);
|
||||
copy->cid = ipfs_cid_copy(original->cid);
|
||||
if (copy != NULL) {
|
||||
copy->data_length = original->data_length;
|
||||
copy->data = (unsigned char*) malloc(original->data_length);
|
||||
if (copy->data == NULL) {
|
||||
ipfs_block_free(copy);
|
||||
return NULL;
|
||||
}
|
||||
memcpy(copy->data, original->data, original->data_length);
|
||||
copy->cid = ipfs_cid_copy(original->cid);
|
||||
if (copy->cid == NULL) {
|
||||
ipfs_block_free(copy);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
return copy;
|
||||
}
|
||||
|
|
|
@ -102,6 +102,8 @@ char* ipfs_blockstore_path_get(const struct FSRepo* fs_repo, const char* filenam
|
|||
}
|
||||
int complete_filename_size = strlen(filepath) + strlen(filename) + 2;
|
||||
char* complete_filename = (char*)malloc(complete_filename_size);
|
||||
if (complete_filename == NULL)
|
||||
return NULL;
|
||||
retVal = os_utils_filepath_join(filepath, filename, complete_filename, complete_filename_size);
|
||||
return complete_filename;
|
||||
}
|
||||
|
|
|
@ -147,6 +147,10 @@ struct Cid* ipfs_cid_copy(const struct Cid* original) {
|
|||
copy->version = original->version;
|
||||
copy->hash_length = original->hash_length;
|
||||
copy->hash = (unsigned char*) malloc(original->hash_length);
|
||||
if (copy->hash == NULL) {
|
||||
ipfs_cid_free(copy);
|
||||
return NULL;
|
||||
}
|
||||
memcpy(copy->hash, original->hash, original->hash_length);
|
||||
}
|
||||
return copy;
|
||||
|
|
|
@ -126,6 +126,10 @@ int ipfs_core_http_process_name(struct IpfsNode* local_node, struct HttpRequest*
|
|||
struct HttpResponse* res = *response;
|
||||
res->content_type = "application/json";
|
||||
res->bytes = (uint8_t*) malloc(strlen(local_node->identity->peer->id) + strlen(path) + 30);
|
||||
if (res->bytes == NULL) {
|
||||
free(result);
|
||||
return 0;
|
||||
}
|
||||
sprintf((char*)res->bytes, "{ \"Path\": \"%s\" }", result);
|
||||
res->bytes_size = strlen((char*)res->bytes);
|
||||
}
|
||||
|
@ -137,6 +141,8 @@ int ipfs_core_http_process_name(struct IpfsNode* local_node, struct HttpRequest*
|
|||
struct HttpResponse* res = *response;
|
||||
res->content_type = "application/json";
|
||||
res->bytes = (uint8_t*) malloc(strlen(local_node->identity->peer->id) + strlen(path) + 30);
|
||||
if (res->bytes == NULL)
|
||||
return 0;
|
||||
sprintf((char*)res->bytes, "{ \"Name\": \"%s\"\n \"Value\": \"%s\" }", local_node->identity->peer->id, path);
|
||||
res->bytes_size = strlen((char*)res->bytes);
|
||||
}
|
||||
|
@ -194,40 +200,44 @@ int ipfs_core_http_process_dht_provide(struct IpfsNode* local_node, struct HttpR
|
|||
struct HttpResponse* res = *response;
|
||||
res->content_type = "application/json";
|
||||
res->bytes = (uint8_t*) malloc(1024);
|
||||
if (!failedCount) {
|
||||
// complete success
|
||||
// TODO: do the right thing
|
||||
snprintf((char*)res->bytes, 1024, "{\n\t\"ID\": \"<string>\"\n" \
|
||||
"\t\"Type\": \"<int>\"\n"
|
||||
"\t\"Responses\": [\n"
|
||||
"\t\t{\n"
|
||||
"\t\t\t\"ID\": \"<string>\"\n"
|
||||
"\t\t\t\"Addrs\": [\n"
|
||||
"\t\t\t\t\"<object>\"\n"
|
||||
"\t\t\t]\n"
|
||||
"\t\t}\n"
|
||||
"\t]\n"
|
||||
"\t\"Extra\": \"<string>\"\n"
|
||||
"}\n"
|
||||
);
|
||||
if (res->bytes == NULL) {
|
||||
res->bytes_size = 0;
|
||||
} else {
|
||||
// at least some failed
|
||||
// TODO: do the right thing
|
||||
snprintf((char*)res->bytes, 1024, "{\n\t\"ID\": \"<string>\",\n" \
|
||||
"\t\"Type\": \"<int>\",\n"
|
||||
"\t\"Responses\": [\n"
|
||||
"\t\t{\n"
|
||||
"\t\t\t\"ID\": \"<string>\",\n"
|
||||
"\t\t\t\"Addrs\": [\n"
|
||||
"\t\t\t\t\"<object>\"\n"
|
||||
"\t\t\t]\n"
|
||||
"\t\t}\n"
|
||||
"\t],\n"
|
||||
"\t\"Extra\": \"<string>\"\n"
|
||||
"}\n"
|
||||
);
|
||||
if (!failedCount) {
|
||||
// complete success
|
||||
// TODO: do the right thing
|
||||
snprintf((char*)res->bytes, 1024, "{\n\t\"ID\": \"<string>\"\n" \
|
||||
"\t\"Type\": \"<int>\"\n"
|
||||
"\t\"Responses\": [\n"
|
||||
"\t\t{\n"
|
||||
"\t\t\t\"ID\": \"<string>\"\n"
|
||||
"\t\t\t\"Addrs\": [\n"
|
||||
"\t\t\t\t\"<object>\"\n"
|
||||
"\t\t\t]\n"
|
||||
"\t\t}\n"
|
||||
"\t]\n"
|
||||
"\t\"Extra\": \"<string>\"\n"
|
||||
"}\n"
|
||||
);
|
||||
} else {
|
||||
// at least some failed
|
||||
// TODO: do the right thing
|
||||
snprintf((char*)res->bytes, 1024, "{\n\t\"ID\": \"<string>\",\n" \
|
||||
"\t\"Type\": \"<int>\",\n"
|
||||
"\t\"Responses\": [\n"
|
||||
"\t\t{\n"
|
||||
"\t\t\t\"ID\": \"<string>\",\n"
|
||||
"\t\t\t\"Addrs\": [\n"
|
||||
"\t\t\t\t\"<object>\"\n"
|
||||
"\t\t\t]\n"
|
||||
"\t\t}\n"
|
||||
"\t],\n"
|
||||
"\t\"Extra\": \"<string>\"\n"
|
||||
"}\n"
|
||||
);
|
||||
}
|
||||
res->bytes_size = strlen((char*)res->bytes);
|
||||
}
|
||||
res->bytes_size = strlen((char*)res->bytes);
|
||||
return failedCount < request->arguments->total;
|
||||
}
|
||||
|
||||
|
@ -322,7 +332,9 @@ char* ipfs_core_http_request_build_url_start(struct IpfsNode* local_node) {
|
|||
sprintf(port, "%d", portInt);
|
||||
int len = 18 + strlen(host) + strlen(port);
|
||||
char* retVal = malloc(len);
|
||||
sprintf(retVal, "http://%s:%s/api/v0", host, port);
|
||||
if (retVal != NULL) {
|
||||
sprintf(retVal, "http://%s:%s/api/v0", host, port);
|
||||
}
|
||||
free(host);
|
||||
multiaddress_free(ma);
|
||||
return retVal;
|
||||
|
@ -338,18 +350,20 @@ int ipfs_core_http_request_add_commands(struct HttpRequest* request, char** url)
|
|||
// command
|
||||
int addl_length = strlen(request->command) + 2;
|
||||
char* string1 = (char*) malloc(strlen(*url) + addl_length);
|
||||
sprintf(string1, "%s/%s", *url, request->command);
|
||||
free(*url);
|
||||
*url = string1;
|
||||
// sub_command
|
||||
if (request->sub_command != NULL) {
|
||||
addl_length = strlen(request->sub_command) + 2;
|
||||
string1 = (char*) malloc(strlen(*url) + addl_length);
|
||||
sprintf(string1, "%s/%s", *url, request->sub_command);
|
||||
if (string1 != NULL) {
|
||||
sprintf(string1, "%s/%s", *url, request->command);
|
||||
free(*url);
|
||||
*url = string1;
|
||||
// sub_command
|
||||
if (request->sub_command != NULL) {
|
||||
addl_length = strlen(request->sub_command) + 2;
|
||||
string1 = (char*) malloc(strlen(*url) + addl_length);
|
||||
sprintf(string1, "%s/%s", *url, request->sub_command);
|
||||
free(*url);
|
||||
*url = string1;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
return string1 != NULL;
|
||||
}
|
||||
|
||||
/***
|
||||
|
|
|
@ -174,6 +174,11 @@ void* ipfs_null_listen (void *ptr)
|
|||
connection_param->local_node = listen_param->local_node;
|
||||
connection_param->port = listen_param->port;
|
||||
connection_param->ip = malloc(INET_ADDRSTRLEN);
|
||||
if (connection_param->ip == NULL) {
|
||||
// we are out of memory
|
||||
free(connection_param);
|
||||
continue;
|
||||
}
|
||||
if (inet_ntop(AF_INET, &(listen_param->ipv4), connection_param->ip, INET_ADDRSTRLEN) == NULL) {
|
||||
free(connection_param->ip);
|
||||
connection_param->ip = NULL;
|
||||
|
|
|
@ -66,6 +66,10 @@ int ipfs_ping (int argc, char **argv)
|
|||
// perhaps they passed an IP and port
|
||||
if (argc >= 3) {
|
||||
char* str = malloc(strlen(argv[2]) + strlen(argv[3]) + 100);
|
||||
if (str == NULL) {
|
||||
// memory issue
|
||||
goto exit;
|
||||
}
|
||||
sprintf(str, "/ip4/%s/tcp/%s", argv[2], argv[3]);
|
||||
peer_to_ping = libp2p_peer_new();
|
||||
if (peer_to_ping) {
|
||||
|
|
|
@ -519,6 +519,10 @@ int ipfs_bitswap_message_protobuf_encode(const struct BitswapMessage* message, u
|
|||
// protobuf it
|
||||
size_t temp_size = ipfs_blocks_block_protobuf_encode_size(entry);
|
||||
uint8_t* temp = (uint8_t*) malloc(temp_size);
|
||||
if (temp == NULL) {
|
||||
// memory issues
|
||||
return 0;
|
||||
}
|
||||
if (!ipfs_blocks_block_protobuf_encode(entry, temp, temp_size, &temp_size)) {
|
||||
free(temp);
|
||||
return 0;
|
||||
|
@ -536,6 +540,9 @@ int ipfs_bitswap_message_protobuf_encode(const struct BitswapMessage* message, u
|
|||
if (message->wantlist != NULL) {
|
||||
size_t temp_size = ipfs_bitswap_wantlist_protobuf_encode_size(message->wantlist);
|
||||
uint8_t* temp = (uint8_t*) malloc(temp_size);
|
||||
if (temp == NULL) {
|
||||
return 0;
|
||||
}
|
||||
if (!ipfs_bitswap_wantlist_protobuf_encode(message->wantlist, temp, temp_size, &temp_size)) {
|
||||
free(temp);
|
||||
return 0;
|
||||
|
@ -665,6 +672,9 @@ int ipfs_bitswap_message_add_wantlist_items(struct BitswapMessage* message, stru
|
|||
struct WantlistEntry* entry = ipfs_bitswap_wantlist_entry_new();
|
||||
entry->block_size = ipfs_cid_protobuf_encode_size(cidEntry->cid);
|
||||
entry->block = (unsigned char*) malloc(entry->block_size);
|
||||
if (entry->block == NULL) {
|
||||
return 0;
|
||||
}
|
||||
if (!ipfs_cid_protobuf_encode(cidEntry->cid, entry->block, entry->block_size, &entry->block_size)) {
|
||||
// TODO: we should do more than return a half-baked list
|
||||
return 0;
|
||||
|
|
|
@ -36,16 +36,20 @@ int ipfs_exporter_get_node(struct IpfsNode* local_node, const unsigned char* has
|
|||
goto exit;
|
||||
}
|
||||
|
||||
libp2p_logger_debug("exporter", "get_node got a value. Converting it to a HashtableNode\n");
|
||||
// unprotobuf
|
||||
if (!ipfs_hashtable_node_protobuf_decode(buffer, buffer_size, result)) {
|
||||
libp2p_logger_debug("exporter", "Conversion to HashtableNode not successful\n");
|
||||
libp2p_logger_error("exporter", "Conversion to HashtableNode not successful\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
// copy in the hash
|
||||
(*result)->hash_size = hash_size;
|
||||
(*result)->hash = malloc(hash_size);
|
||||
if ( (*result)->hash == NULL) {
|
||||
// memory issue
|
||||
libp2p_logger_error("exporter", "get_node: Unable to allocate memory.\n");
|
||||
goto exit;
|
||||
}
|
||||
memcpy((*result)->hash, hash, hash_size);
|
||||
|
||||
retVal = 1;
|
||||
|
|
|
@ -220,6 +220,12 @@ int ipfs_import_file(const char* root_dir, const char* fileName, struct Hashtabl
|
|||
} else {
|
||||
free(path);
|
||||
path = malloc(strlen(root_dir) + strlen(file) + 2);
|
||||
if (path == NULL) {
|
||||
// memory issue
|
||||
if (file != NULL)
|
||||
free(file);
|
||||
return 0;
|
||||
}
|
||||
os_utils_filepath_join(root_dir, file, path, strlen(root_dir) + strlen(file) + 2);
|
||||
new_root_dir = path;
|
||||
}
|
||||
|
@ -328,6 +334,9 @@ struct FileList* ipfs_import_get_filelist(struct CliArguments* args) {
|
|||
continue;
|
||||
}
|
||||
struct FileList* current = (struct FileList*)malloc(sizeof(struct FileList));
|
||||
if (current == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
current->next = NULL;
|
||||
current->file_name = args->argv[i];
|
||||
// now wire it in
|
||||
|
|
|
@ -28,9 +28,16 @@ int ipfs_resolver_next_path(const char* path, char** next_part) {
|
|||
char* pos = strchr(&path[i+1], '/');
|
||||
if (pos == NULL) {
|
||||
*next_part = (char*)malloc(strlen(path) + 1);
|
||||
if ( *next_part == NULL) {
|
||||
// memory issue
|
||||
return 0;
|
||||
}
|
||||
strcpy(*next_part, path);
|
||||
} else {
|
||||
*next_part = (char*)malloc(pos - &path[i] + 1);
|
||||
if (*next_part == NULL) {
|
||||
return 0;
|
||||
}
|
||||
strncpy(*next_part, &path[i], pos-&path[i]);
|
||||
(*next_part)[pos-&path[i]] = 0;
|
||||
}
|
||||
|
@ -145,9 +152,11 @@ struct HashtableNode* ipfs_resolver_remote_get(const char* path, struct Hashtabl
|
|||
message->key_size = strlen(key);
|
||||
size_t b58size = 100;
|
||||
uint8_t *b58key = (uint8_t *) malloc(b58size);
|
||||
libp2p_crypto_encoding_base58_encode((unsigned char*)message->key, message->key_size, (unsigned char**) &b58key, &b58size);
|
||||
libp2p_logger_debug("resolver", "Attempting to use kademlia to get key %s.\n", b58key);
|
||||
free(b58key);
|
||||
if (b58key == NULL) {
|
||||
libp2p_crypto_encoding_base58_encode((unsigned char*)message->key, message->key_size, (unsigned char**) &b58key, &b58size);
|
||||
libp2p_logger_debug("resolver", "Attempting to use kademlia to get key %s.\n", b58key);
|
||||
free(b58key);
|
||||
}
|
||||
size_t message_protobuf_size = libp2p_message_protobuf_encode_size(message);
|
||||
unsigned char message_protobuf[message_protobuf_size];
|
||||
libp2p_message_protobuf_encode(message, message_protobuf, message_protobuf_size, &message_protobuf_size);
|
||||
|
|
|
@ -41,6 +41,9 @@ void stripit(int argc, char** argv) {
|
|||
char* old_arg = argv[argc];
|
||||
int full_length = strlen(old_arg);
|
||||
char *tmp = (char*) malloc(full_length + 1);
|
||||
if (tmp == NULL) {
|
||||
return;
|
||||
}
|
||||
char* ptr1 = &old_arg[1];
|
||||
strcpy(tmp, ptr1);
|
||||
tmp[strlen(tmp)-1] = 0;
|
||||
|
|
|
@ -29,6 +29,7 @@ enum WireType ipfs_node_link_message_fields[] = { WIRETYPE_LENGTH_DELIMITED, WIR
|
|||
* @Param name: The name of the link (char *)
|
||||
* @Param size: Size of the link (size_t)
|
||||
* @Param ahash: An Qmhash
|
||||
* @returns true(1) on success, false(0) otherwise
|
||||
*/
|
||||
int ipfs_node_link_create(char * name, unsigned char * ahash, size_t hash_size, struct NodeLink** node_link)
|
||||
{
|
||||
|
@ -40,12 +41,18 @@ int ipfs_node_link_create(char * name, unsigned char * ahash, size_t hash_size,
|
|||
// hash
|
||||
link->hash_size = hash_size;
|
||||
link->hash = (unsigned char*)malloc(hash_size);
|
||||
if (link->hash == NULL) {
|
||||
ipfs_node_link_free(link);
|
||||
*node_link = NULL;
|
||||
return 0;
|
||||
}
|
||||
memcpy(link->hash, ahash, hash_size);
|
||||
// name
|
||||
if (name != NULL && strlen(name) > 0) {
|
||||
link->name = malloc(strlen(name) + 1);
|
||||
if ( link->name == NULL) {
|
||||
free(link);
|
||||
ipfs_node_link_free(link);
|
||||
*node_link = NULL;
|
||||
return 0;
|
||||
}
|
||||
strcpy(link->name, name);
|
||||
|
@ -192,6 +199,9 @@ int ipfs_node_link_protobuf_decode(unsigned char* buffer, size_t buffer_length,
|
|||
}
|
||||
link->hash_size = hash_size - 2;
|
||||
link->hash = (unsigned char*)malloc(link->hash_size);
|
||||
if (link->hash == NULL) {
|
||||
goto exit;
|
||||
}
|
||||
memcpy((char*)link->hash, (char*)&hash[2], link->hash_size);
|
||||
free(hash);
|
||||
pos += bytes_read;
|
||||
|
@ -729,13 +739,18 @@ int Node_Resolve(char ** result, char * input1)
|
|||
char * tr;
|
||||
char * end;
|
||||
tr=strtok_r(input,"/",&end);
|
||||
int retVal = 1;
|
||||
for(int i = 0;tr;i++)
|
||||
{
|
||||
result[i] = (char *) malloc(strlen(tr)+1);
|
||||
strcpy(result[i], tr);
|
||||
if (result[i] != NULL) {
|
||||
strcpy(result[i], tr);
|
||||
} else {
|
||||
retVal = 0;
|
||||
}
|
||||
tr=strtok_r(NULL,"/",&end);
|
||||
}
|
||||
return 1;
|
||||
return retVal;
|
||||
}
|
||||
|
||||
/*Node_Resolve_Links
|
||||
|
@ -751,6 +766,9 @@ struct Link_Proc * Node_Resolve_Links(struct HashtableNode * N, char * path)
|
|||
}
|
||||
int expected_link_ammount = Node_Resolve_Max_Size(path);
|
||||
struct Link_Proc * LProc = (struct Link_Proc *) malloc(sizeof(struct Link_Proc) + sizeof(struct NodeLink) * expected_link_ammount);
|
||||
if (LProc == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
LProc->ammount = 0;
|
||||
char * linknames[expected_link_ammount];
|
||||
Node_Resolve(linknames, path);
|
||||
|
@ -761,8 +779,10 @@ struct Link_Proc * Node_Resolve_Links(struct HashtableNode * N, char * path)
|
|||
if(proclink)
|
||||
{
|
||||
LProc->links[i] = (struct NodeLink *)malloc(sizeof(struct NodeLink));
|
||||
memcpy(LProc->links[i], proclink, sizeof(struct NodeLink));
|
||||
LProc->ammount++;
|
||||
if (LProc->links[i] == NULL) { // TODO: What should we do if memory wasn't allocated here?
|
||||
memcpy(LProc->links[i], proclink, sizeof(struct NodeLink));
|
||||
LProc->ammount++;
|
||||
}
|
||||
free(proclink);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,6 +41,10 @@ int ipfs_namesys_resolver_resolve_once(struct IpfsNode* local_node, const char*
|
|||
if (local_node->repo->config->datastore->datastore_get(cid->hash, cid->hash_length, &record, local_node->repo->config->datastore)) {
|
||||
// we are able to handle this locally... return the results
|
||||
*results = (char*) malloc(record->value_size + 1);
|
||||
if (*results == NULL) {
|
||||
ipfs_cid_free(cid);
|
||||
return 0;
|
||||
}
|
||||
memset(*results, 0, record->value_size + 1);
|
||||
memcpy(*results, record->value, record->value_size);
|
||||
ipfs_cid_free(cid);
|
||||
|
@ -64,6 +68,9 @@ int ipfs_namesys_resolver_resolve_once(struct IpfsNode* local_node, const char*
|
|||
int ipfs_namesys_resolver_resolve(struct IpfsNode* local_node, const char* path, int recursive, char** results) {
|
||||
char* result = NULL;
|
||||
char* current_path = (char*) malloc(strlen(path) + 1);
|
||||
if (current_path == NULL) {
|
||||
return 0;
|
||||
}
|
||||
strcpy(current_path, path);
|
||||
|
||||
// if we go more than 10 deep, bail
|
||||
|
@ -84,7 +91,8 @@ int ipfs_namesys_resolver_resolve(struct IpfsNode* local_node, const char* path,
|
|||
// result will not be NULL
|
||||
free(current_path);
|
||||
current_path = (char*) malloc(strlen(result)+1);
|
||||
strcpy(current_path, result);
|
||||
if (current_path != NULL)
|
||||
strcpy(current_path, result);
|
||||
free(result);
|
||||
counter++;
|
||||
} while(recursive && is_ipns_string(current_path));
|
||||
|
|
|
@ -131,6 +131,8 @@ int ipfs_namesys_hex_string_to_bytes(const unsigned char* hex, unsigned char** b
|
|||
// allocate memory
|
||||
*buffer = (unsigned char*)malloc( hex_size / 2 );
|
||||
unsigned char* ptr = *buffer;
|
||||
if (ptr == NULL)
|
||||
return ErrAllocFailed;
|
||||
|
||||
// convert string
|
||||
for(size_t i = 0; i < hex_size; i++) {
|
||||
|
|
|
@ -7,8 +7,10 @@
|
|||
char* alloc_and_copy(char* source) {
|
||||
unsigned long strLen = strlen(source);
|
||||
char* result = malloc(sizeof(char) * (strLen + 1));
|
||||
strncpy(result, source, strLen);
|
||||
result[strLen] = 0;
|
||||
if (result != NULL) {
|
||||
strncpy(result, source, strLen);
|
||||
result[strLen] = 0;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -140,24 +140,34 @@ int ipfs_repo_config_init(struct RepoConfig* config, unsigned int num_bits_for_k
|
|||
|
||||
// swarm addresses
|
||||
char* addr1 = malloc(64);
|
||||
sprintf(addr1, "/ip4/0.0.0.0/tcp/%d", swarm_port);
|
||||
config->addresses->swarm_head = libp2p_utils_linked_list_new();
|
||||
config->addresses->swarm_head->item = malloc(strlen(addr1) + 1);
|
||||
strcpy(config->addresses->swarm_head->item, addr1);
|
||||
if (addr1 != NULL) {
|
||||
sprintf(addr1, "/ip4/0.0.0.0/tcp/%d", swarm_port);
|
||||
config->addresses->swarm_head = libp2p_utils_linked_list_new();
|
||||
if (config->addresses->swarm_head != NULL) {
|
||||
config->addresses->swarm_head->item = malloc(strlen(addr1) + 1);
|
||||
if (config->addresses->swarm_head->item != NULL) {
|
||||
strcpy(config->addresses->swarm_head->item, addr1);
|
||||
}
|
||||
|
||||
sprintf(addr1, "/ip6/::/tcp/%d", swarm_port);
|
||||
config->addresses->swarm_head->next = libp2p_utils_linked_list_new();
|
||||
config->addresses->swarm_head->next->item = malloc(strlen(addr1) + 1);
|
||||
strcpy(config->addresses->swarm_head->next->item, addr1);
|
||||
|
||||
int port_adder = swarm_port - 4001;
|
||||
sprintf(addr1, "/ip4/127.0.0.1/tcp/%d", 5001 + port_adder);
|
||||
config->addresses->api = malloc(strlen(addr1)+1);
|
||||
strcpy(config->addresses->api, addr1);
|
||||
sprintf(addr1, "/ip4/127.0.0.1/tcp/%d", 8080 + port_adder);
|
||||
config->addresses->gateway = malloc(strlen(addr1)+1);
|
||||
strcpy(config->addresses->gateway, addr1);
|
||||
free(addr1);
|
||||
sprintf(addr1, "/ip6/::/tcp/%d", swarm_port);
|
||||
config->addresses->swarm_head->next = libp2p_utils_linked_list_new();
|
||||
if (config->addresses->swarm_head->next != NULL) {
|
||||
config->addresses->swarm_head->next->item = malloc(strlen(addr1) + 1);
|
||||
if (config->addresses->swarm_head->next->item != NULL)
|
||||
strcpy(config->addresses->swarm_head->next->item, addr1);
|
||||
}
|
||||
}
|
||||
int port_adder = swarm_port - 4001;
|
||||
sprintf(addr1, "/ip4/127.0.0.1/tcp/%d", 5001 + port_adder);
|
||||
config->addresses->api = malloc(strlen(addr1)+1);
|
||||
if (config->addresses->api != NULL)
|
||||
strcpy(config->addresses->api, addr1);
|
||||
sprintf(addr1, "/ip4/127.0.0.1/tcp/%d", 8080 + port_adder);
|
||||
config->addresses->gateway = malloc(strlen(addr1)+1);
|
||||
if (config->addresses->gateway != NULL)
|
||||
strcpy(config->addresses->gateway, addr1);
|
||||
free(addr1);
|
||||
}
|
||||
|
||||
config->discovery.mdns.enabled = 1;
|
||||
config->discovery.mdns.interval = 10;
|
||||
|
|
|
@ -6,8 +6,10 @@
|
|||
|
||||
char* alloc_and_fill(char* source) {
|
||||
char* newString = malloc(sizeof(char) * (strlen(source) + 1));
|
||||
strncpy(newString, source, strlen(source));
|
||||
newString[strlen(source)] = 0;
|
||||
if (newString != NULL) {
|
||||
strncpy(newString, source, strlen(source));
|
||||
newString[strlen(source)] = 0;
|
||||
}
|
||||
return newString;
|
||||
}
|
||||
|
||||
|
|
|
@ -150,7 +150,8 @@ int ipfs_repo_fsrepo_new(const char* repo_path, struct RepoConfig* config, struc
|
|||
} else {
|
||||
int len = strlen(repo_path) + 1;
|
||||
(*repo)->path = (char*)malloc(len);
|
||||
strncpy((*repo)->path, repo_path, len);
|
||||
if ( (*repo)->path != NULL)
|
||||
strncpy((*repo)->path, repo_path, len);
|
||||
}
|
||||
// allocate other structures
|
||||
if (config != NULL)
|
||||
|
@ -574,10 +575,13 @@ int ipfs_repo_fsrepo_node_get(const unsigned char* hash, size_t hash_length, voi
|
|||
if (retVal == 1) {
|
||||
*node_size = ipfs_hashtable_node_protobuf_encode_size(node);
|
||||
*node_obj = malloc(*node_size);
|
||||
if (*node_obj == NULL) {
|
||||
ipfs_hashtable_node_free(node);
|
||||
return 0;
|
||||
}
|
||||
retVal = ipfs_hashtable_node_protobuf_encode(node, *node_obj, *node_size, node_size);
|
||||
}
|
||||
if (node != NULL)
|
||||
ipfs_hashtable_node_free(node);
|
||||
ipfs_hashtable_node_free(node);
|
||||
return retVal;
|
||||
}
|
||||
|
||||
|
|
|
@ -220,6 +220,10 @@ int repo_fsrepo_lmdb_put(struct DatastoreRecord* datastore_record, const struct
|
|||
journalstore_record = lmdb_journal_record_new();
|
||||
journalstore_record->hash_size = datastore_record->key_size;
|
||||
journalstore_record->hash = malloc(datastore_record->key_size);
|
||||
if (journalstore_record->hash == NULL) {
|
||||
libp2p_logger_error("lmdb_datastore", "put: Unable to allocate memory for key.\n");
|
||||
return 0;
|
||||
}
|
||||
memcpy(journalstore_record->hash, datastore_record->key, datastore_record->key_size);
|
||||
journalstore_record->timestamp = datastore_record->timestamp;
|
||||
// look up the corresponding journalstore record for possible updating
|
||||
|
@ -264,17 +268,23 @@ int repo_fsrepo_lmdb_put(struct DatastoreRecord* datastore_record, const struct
|
|||
// add it to the journalstore
|
||||
journalstore_record = lmdb_journal_record_new();
|
||||
journalstore_record->hash = (uint8_t*) malloc(datastore_record->key_size);
|
||||
memcpy(journalstore_record->hash, datastore_record->key, datastore_record->key_size);
|
||||
journalstore_record->hash_size = datastore_record->key_size;
|
||||
journalstore_record->timestamp = datastore_record->timestamp;
|
||||
journalstore_record->pending = 1; // TODO: Calculate this correctly
|
||||
journalstore_record->pin = 1;
|
||||
if (!lmdb_journalstore_journal_add(journalstore_cursor, journalstore_record)) {
|
||||
libp2p_logger_error("lmdb_datastore", "Datastore record was added, but problem adding Journalstore record. Continuing.\n");
|
||||
if (journalstore_record->hash == NULL) {
|
||||
libp2p_logger_error("lmdb_datastore", "Unable to allocate memory to add record to journalstore.\n");
|
||||
lmdb_journalstore_cursor_close(journalstore_cursor, 0);
|
||||
lmdb_journal_record_free(journalstore_record);
|
||||
} else {
|
||||
memcpy(journalstore_record->hash, datastore_record->key, datastore_record->key_size);
|
||||
journalstore_record->hash_size = datastore_record->key_size;
|
||||
journalstore_record->timestamp = datastore_record->timestamp;
|
||||
journalstore_record->pending = 1; // TODO: Calculate this correctly
|
||||
journalstore_record->pin = 1;
|
||||
if (!lmdb_journalstore_journal_add(journalstore_cursor, journalstore_record)) {
|
||||
libp2p_logger_error("lmdb_datastore", "Datastore record was added, but problem adding Journalstore record. Continuing.\n");
|
||||
}
|
||||
lmdb_journalstore_cursor_close(journalstore_cursor, 0);
|
||||
lmdb_journal_record_free(journalstore_record);
|
||||
retVal = 1;
|
||||
}
|
||||
lmdb_journalstore_cursor_close(journalstore_cursor, 0);
|
||||
lmdb_journal_record_free(journalstore_record);
|
||||
retVal = 1;
|
||||
}
|
||||
} else {
|
||||
// datastore record was unable to be added.
|
||||
|
@ -321,10 +331,25 @@ int repo_fsrepro_lmdb_open(int argc, char** argv, struct Datastore* datastore) {
|
|||
}
|
||||
|
||||
struct lmdb_context *db_context = (struct lmdb_context *) malloc(sizeof(struct lmdb_context));
|
||||
if (db_context == NULL) {
|
||||
mdb_env_close(mdb_env);
|
||||
return 0;
|
||||
}
|
||||
datastore->datastore_context = (void*) db_context;
|
||||
db_context->db_environment = (void*)mdb_env;
|
||||
db_context->datastore_db = (MDB_dbi*) malloc(sizeof(MDB_dbi));
|
||||
if (db_context->datastore_db == NULL) {
|
||||
mdb_env_close(mdb_env);
|
||||
free(db_context);
|
||||
return 0;
|
||||
}
|
||||
db_context->journal_db = (MDB_dbi*) malloc(sizeof(MDB_dbi));
|
||||
if (db_context->journal_db == NULL) {
|
||||
free(db_context->datastore_db);
|
||||
free(db_context);
|
||||
mdb_env_close(mdb_env);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// open the 2 databases
|
||||
if (mdb_txn_begin(mdb_env, NULL, 0, &db_context->current_transaction) != 0) {
|
||||
|
|
|
@ -101,8 +101,12 @@ int lmdb_journalstore_build_record(const struct MDB_val* db_key, const struct MD
|
|||
}
|
||||
rec->hash_size = db_value->mv_size - 2;
|
||||
rec->hash = malloc(rec->hash_size);
|
||||
uint8_t *val = (uint8_t*)db_value->mv_data;
|
||||
memcpy(rec->hash, &val[2], rec->hash_size);
|
||||
if (rec->hash != NULL) {
|
||||
uint8_t *val = (uint8_t*)db_value->mv_data;
|
||||
memcpy(rec->hash, &val[2], rec->hash_size);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -95,7 +95,8 @@ int make_ipfs_repository(const char* path, int swarm_port, struct Libp2pVector*
|
|||
printf("peer identity: %s\n", fs_repo->config->identity->peer->id);
|
||||
if (peer_id != NULL) {
|
||||
*peer_id = malloc(fs_repo->config->identity->peer->id_size + 1);
|
||||
strcpy(*peer_id, fs_repo->config->identity->peer->id);
|
||||
if (*peer_id != NULL)
|
||||
strcpy(*peer_id, fs_repo->config->identity->peer->id);
|
||||
}
|
||||
|
||||
// make sure the repository exists
|
||||
|
|
|
@ -50,6 +50,7 @@ int ipfs_routing_kademlia_get_value(struct IpfsRouting* routing, const unsigned
|
|||
* @returns true(1) on success, otherwise false(0)
|
||||
*/
|
||||
int ipfs_routing_kademlia_find_providers(struct IpfsRouting* routing, const unsigned char* key, size_t key_size, struct Libp2pVector** results) {
|
||||
int retVal = 1;
|
||||
*results = libp2p_utils_vector_new(1);
|
||||
struct Libp2pVector* vector = *results;
|
||||
// see if I can provide it
|
||||
|
@ -70,26 +71,30 @@ int ipfs_routing_kademlia_find_providers(struct IpfsRouting* routing, const unsi
|
|||
if (vector->total == 0) {
|
||||
// search requires null terminated key
|
||||
char* key_nt = malloc(key_size + 1);
|
||||
strncpy(key_nt, (char*)key, key_size);
|
||||
key_nt[key_size] = 0;
|
||||
struct MultiAddress** list = search_kademlia(key_nt, 3);
|
||||
free(key_nt);
|
||||
if (list != NULL) {
|
||||
int i = 0;
|
||||
while (list[i] != NULL) {
|
||||
struct MultiAddress* current = list[i];
|
||||
libp2p_utils_vector_add(vector, current);
|
||||
i++;
|
||||
if (key_nt != NULL) {
|
||||
strncpy(key_nt, (char*)key, key_size);
|
||||
key_nt[key_size] = 0;
|
||||
struct MultiAddress** list = search_kademlia(key_nt, 3);
|
||||
free(key_nt);
|
||||
if (list != NULL) {
|
||||
int i = 0;
|
||||
while (list[i] != NULL) {
|
||||
struct MultiAddress* current = list[i];
|
||||
libp2p_utils_vector_add(vector, current);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
retVal = 0;
|
||||
}
|
||||
}
|
||||
if (vector->total == 0) {
|
||||
// we were unable to find it, even on the network
|
||||
libp2p_utils_vector_free(vector);
|
||||
vector = NULL;
|
||||
return 0;
|
||||
retVal = 0;
|
||||
}
|
||||
return 1;
|
||||
return retVal;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -97,6 +97,8 @@ int ipfs_routing_generic_get_value (ipfs_routing* routing, const unsigned char *
|
|||
// protobuf the node
|
||||
int protobuf_size = ipfs_hashtable_node_protobuf_encode_size(node);
|
||||
*val = malloc(protobuf_size);
|
||||
if (*val == NULL)
|
||||
goto exit;
|
||||
|
||||
if (ipfs_hashtable_node_protobuf_encode(node, *val, protobuf_size, vlen) == 0) {
|
||||
goto exit;
|
||||
|
|
|
@ -53,12 +53,20 @@ int ipfs_routing_online_find_remote_providers(struct IpfsRouting* routing, const
|
|||
message->message_type = MESSAGE_TYPE_GET_PROVIDERS;
|
||||
message->key_size = key_size;
|
||||
message->key = malloc(message->key_size);
|
||||
if (message->key == NULL) {
|
||||
libp2p_message_free(message);
|
||||
return 0;
|
||||
}
|
||||
memcpy(message->key, key, message->key_size);
|
||||
size_t b58size = 100;
|
||||
uint8_t *b58key = (uint8_t *) malloc(b58size);
|
||||
libp2p_crypto_encoding_base58_encode((unsigned char*)message->key, message->key_size, (unsigned char**) &b58key, &b58size);
|
||||
libp2p_logger_debug("online", "find_remote_providers looking for key %s.\n", b58key);
|
||||
free(b58key);
|
||||
if (libp2p_logger_watching_class("online")) {
|
||||
size_t b58size = 100;
|
||||
uint8_t *b58key = (uint8_t *) malloc(b58size);
|
||||
if (b58key != NULL) {
|
||||
libp2p_crypto_encoding_base58_encode((unsigned char*)message->key, message->key_size, (unsigned char**) &b58key, &b58size);
|
||||
libp2p_logger_debug("online", "find_remote_providers looking for key %s.\n", b58key);
|
||||
free(b58key);
|
||||
}
|
||||
}
|
||||
// loop through the connected peers, asking for the hash
|
||||
struct Libp2pLinkedList* current_entry = routing->local_node->peerstore->head_entry;
|
||||
while (current_entry != NULL) {
|
||||
|
@ -246,6 +254,10 @@ int ipfs_routing_online_provide(struct IpfsRouting* routing, const unsigned char
|
|||
struct KademliaMessage* msg = libp2p_message_new();
|
||||
msg->key_size = key_size;
|
||||
msg->key = malloc(msg->key_size);
|
||||
if (msg->key == NULL) {
|
||||
libp2p_message_free(msg);
|
||||
return 0;
|
||||
}
|
||||
memcpy(msg->key, key, msg->key_size);
|
||||
msg->message_type = MESSAGE_TYPE_ADD_PROVIDER;
|
||||
msg->provider_peer_head = libp2p_utils_linked_list_new();
|
||||
|
@ -333,6 +345,10 @@ int ipfs_routing_online_get_peer_value(ipfs_routing* routing, const struct Libp2
|
|||
struct KademliaMessage* msg = libp2p_message_new();
|
||||
msg->key_size = key_size;
|
||||
msg->key = malloc(msg->key_size);
|
||||
if (msg->key == NULL) {
|
||||
libp2p_message_free(msg);
|
||||
return 0;
|
||||
}
|
||||
memcpy(msg->key, key, msg->key_size);
|
||||
msg->message_type = MESSAGE_TYPE_GET_VALUE;
|
||||
|
||||
|
|
|
@ -139,12 +139,14 @@ int ipfs_unixfs_add_data(unsigned char* data, size_t data_length, struct UnixFS*
|
|||
}
|
||||
|
||||
// debug: display hash
|
||||
size_t b58size = 100;
|
||||
uint8_t *b58key = (uint8_t *) malloc(b58size);
|
||||
if (b58key != NULL) {
|
||||
libp2p_crypto_encoding_base58_encode(unix_fs->hash, unix_fs->hash_length, &b58key, &b58size);
|
||||
libp2p_logger_debug("unixfs", "Saving hash of %s to unixfs object.\n", b58key);
|
||||
free(b58key);
|
||||
if (libp2p_logger_watching_class("unixfs")) {
|
||||
size_t b58size = 100;
|
||||
uint8_t *b58key = (uint8_t *) malloc(b58size);
|
||||
if (b58key != NULL) {
|
||||
libp2p_crypto_encoding_base58_encode(unix_fs->hash, unix_fs->hash_length, &b58key, &b58size);
|
||||
libp2p_logger_debug("unixfs", "Saving hash of %s to unixfs object.\n", b58key);
|
||||
free(b58key);
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
@ -157,6 +159,9 @@ int ipfs_unixfs_add_blocksize(const struct UnixFSBlockSizeNode* blocksize, struc
|
|||
if (last == NULL) {
|
||||
// we're the first one
|
||||
unix_fs->block_size_head = (struct UnixFSBlockSizeNode*)malloc(sizeof(struct UnixFSBlockSizeNode));
|
||||
if (unix_fs->block_size_head == NULL) {
|
||||
return 0;
|
||||
}
|
||||
unix_fs->block_size_head->block_size = blocksize->block_size;
|
||||
unix_fs->block_size_head->next = NULL;
|
||||
} else {
|
||||
|
@ -165,6 +170,8 @@ int ipfs_unixfs_add_blocksize(const struct UnixFSBlockSizeNode* blocksize, struc
|
|||
last = last->next;
|
||||
}
|
||||
last->next = (struct UnixFSBlockSizeNode*)malloc(sizeof(struct UnixFSBlockSizeNode));
|
||||
if (last->next == NULL)
|
||||
return 0;
|
||||
last->next->block_size = blocksize->block_size;
|
||||
last->next->next = NULL;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue