Added routines to cleanly shutdown daemon

This still needs more work, but the mechanism now exists
This commit is contained in:
John Jones 2017-04-17 11:58:47 -05:00
parent 2b0a29a06b
commit 427b5c948f
18 changed files with 265 additions and 108 deletions

View file

@ -52,6 +52,9 @@ void ipfs_bootstrap_announce_files(struct IpfsNode* local_node) {
free(key);
}
// close cursor
db->datastore_cursor_close(db);
return;
}

View file

@ -13,21 +13,21 @@
#include "libp2p/utils/logger.h"
int ipfs_daemon_start(char* repo_path) {
int count_pths = 0;
int count_pths = 0, retVal = 0;
pthread_t work_pths[MAX];
struct IpfsNodeListenParams listen_param;
struct MultiAddress* ma = NULL;
libp2p_logger_info("daemon", "Initializing daemon...\n");
// read the configuration
struct FSRepo* fs_repo;
struct FSRepo* fs_repo = NULL;
if (!ipfs_repo_fsrepo_new(repo_path, NULL, &fs_repo))
return 0;
goto exit;
// open the repository and read the file
if (!ipfs_repo_fsrepo_open(fs_repo)) {
ipfs_repo_fsrepo_free(fs_repo);
return 0;
goto exit;
}
// create a new IpfsNode
@ -39,7 +39,7 @@ int ipfs_daemon_start(char* repo_path) {
local_node.identity = fs_repo->config->identity;
// Set null router param
struct MultiAddress *ma = multiaddress_new_from_string(fs_repo->config->addresses->swarm_head->item);
ma = multiaddress_new_from_string(fs_repo->config->addresses->swarm_head->item);
listen_param.port = multiaddress_get_ip_port(ma);
listen_param.ipv4 = 0; // ip 0.0.0.0, all interfaces
listen_param.local_node = &local_node;
@ -47,15 +47,10 @@ int ipfs_daemon_start(char* repo_path) {
// Create pthread for swarm listener.
if (pthread_create(&work_pths[count_pths++], NULL, ipfs_null_listen, &listen_param)) {
libp2p_logger_error("daemon", "Error creating thread for ipfs null listen\n");
return 1;
goto exit;
}
ipfs_bootstrap_routing(&local_node);
/*
if (pthread_create(&work_pths[count_pths++], NULL, ipfs_bootstrap_routing, &local_node)) {
fprintf(stderr, "Error creating thread for routing\n");
}
*/
libp2p_logger_info("daemon", "Daemon is ready\n");
@ -63,15 +58,33 @@ int ipfs_daemon_start(char* repo_path) {
while (count_pths) {
if (pthread_join(work_pths[--count_pths], NULL)) {
libp2p_logger_error("daemon", "Error joining thread\n");
return 2;
goto exit;
}
}
// All pthreads aborted?
return 0;
retVal = 1;
exit:
fprintf(stderr, "Cleaning up daemon processes\n");
// clean up
if (fs_repo != NULL)
ipfs_repo_fsrepo_free(fs_repo);
if (local_node.peerstore != NULL)
libp2p_peerstore_free(local_node.peerstore);
if (local_node.providerstore != NULL)
libp2p_providerstore_free(local_node.providerstore);
if (ma != NULL)
multiaddress_free(ma);
if (local_node.routing != NULL) {
ipfs_routing_online_free(local_node.routing);
}
return retVal;
}
int ipfs_daemon_stop() {
return ipfs_null_shutdown();
}
int ipfs_daemon (int argc, char **argv)
{
char* repo_path = NULL;

View file

@ -22,6 +22,8 @@
#define BUF_SIZE 4096
static int null_shutting_down = 0;
/***
* Compare incoming to see if they are requesting a protocol upgrade
* @param incoming the incoming string
@ -70,12 +72,15 @@ void *ipfs_null_connection (void *ptr)
if (!libp2p_secio_handshake(&session, &connection_param->local_node->identity->private_key, 1)) {
// rejecting connection
libp2p_logger_debug("null", "Secure IO connection failed\n");
free(results);
break;
}
} else if (protocol_compare(results, bytes_read, "/nodeio")) {
libp2p_logger_debug("null", "Attempting a nodeio connection.\n");
if (!libp2p_nodeio_handshake(&session))
if (!libp2p_nodeio_handshake(&session)) {
free(results);
break;
}
// loop through file requests
int _continue = 1;
while(_continue) {
@ -107,6 +112,7 @@ void *ipfs_null_connection (void *ptr)
libp2p_logger_log("null", LOGLEVEL_DEBUG, "Attempting kademlia connection...\n");
if (!libp2p_routing_dht_handshake(&session)) {
libp2p_logger_log("null", LOGLEVEL_DEBUG, "kademlia connection handshake failed\n");
free(results);
break;
}
// this handles 1 transaction
@ -118,6 +124,7 @@ void *ipfs_null_connection (void *ptr)
// oops there was a problem
//TODO: Handle this
}
free(results);
}
} else {
libp2p_logger_log("null", LOGLEVEL_DEBUG, "Multistream negotiation failed\n");
@ -126,6 +133,9 @@ void *ipfs_null_connection (void *ptr)
if (session.default_stream != NULL) {
session.default_stream->close(&session);
}
if (session.insecure_stream != NULL) {
libp2p_net_multistream_stream_free(session.insecure_stream);
}
(*(connection_param->count))--; // update counter.
free (connection_param);
return (void*) 1;
@ -148,6 +158,11 @@ void *ipfs_null_listen (void *ptr)
libp2p_logger_log("null", LOGLEVEL_ERROR, "Ipfs listening on %d\n", listen_param->port);
for (;;) {
int numDescriptors = socket_read_select4(socketfd, 5);
if (null_shutting_down) {
break;
}
if (numDescriptors > 0) {
s = socket_accept4(socketfd, &(listen_param->ipv4), &(listen_param->port));
if (count >= CONNECTIONS) { // limit reached.
close (s);
@ -176,6 +191,12 @@ void *ipfs_null_listen (void *ptr)
}
}
}
}
return (void*) 2;
}
int ipfs_null_shutdown() {
null_shutting_down = 1;
return 1;
}

View file

@ -28,8 +28,10 @@ struct IpfsNodeListenParams {
void *ipfs_null_connection (void *ptr);
void *ipfs_null_listen (void *ptr);
int ipfs_null_shutdown();
int ipfs_daemon (int argc, char **argv);
int ipfs_daemon_start(char* repo_path);
int ipfs_daemon_stop();
int ipfs_ping (int argc, char **argv);
#endif // DAEMON_H

View file

@ -16,11 +16,9 @@ struct Addresses {
/**
* initialize the Addresses struct with data. Must add the SwarmAddresses later
* @param addresses the struct
* @param api the API address (like "/ip4/127.0.0.1/tcp/5001")
* @param gateway the gateway address (like "ip4/127.0.0.1/tcp/8080")
* @returns true(1) on success, otherwise false(0)
*/
int repo_config_addresses_new(struct Addresses** addresses, char* api, char* gateway);
int repo_config_addresses_new(struct Addresses** addresses);
/**
* clear any memory allocated by a address_new call

View file

@ -80,6 +80,7 @@ typedef struct IpfsRouting ipfs_routing;
ipfs_routing* ipfs_routing_new_offline (struct IpfsNode* local_node, struct RsaPrivateKey *private_key);
// online using secio, should probably be deprecated
ipfs_routing* ipfs_routing_new_online (struct IpfsNode* local_node, struct RsaPrivateKey* private_key, struct Stream* stream);
int ipfs_routing_online_free(ipfs_routing*);
// online using DHT/kademlia, the recommended router
ipfs_routing* ipfs_routing_new_kademlia(struct IpfsNode* local_node, struct RsaPrivateKey* private_key, struct Stream* stream);
// generic routines

View file

@ -12,22 +12,16 @@ char* alloc_and_copy(char* source) {
return result;
}
int repo_config_addresses_new(struct Addresses** addresses, char* api, char* gateway) {
int repo_config_addresses_new(struct Addresses** addresses) {
*addresses = (struct Addresses*)malloc(sizeof(struct Addresses));
if (*addresses == NULL)
return 0;
struct Addresses* addr = *addresses;
// allocate memory to store api and gateway
addr->api = alloc_and_copy(api);
addr->gateway = alloc_and_copy(gateway);
if ( addr->api == NULL || addr->gateway == NULL)
return 0;
// allocate memory for swarm_addresses
//if (repo_config_swarm_address_new(&((*addresses)->swarm)) == 0)
// return 0;
// this is now allocated when it is filled
// allocate memory to store api and gateway
addr->api = NULL;
addr->gateway = NULL;
addr->swarm_head = NULL;
return 1;

View file

@ -6,6 +6,7 @@
int repo_config_bootstrap_peers_retrieve(struct Libp2pVector** list) {
/*
char* default_bootstrap_addresses[] = {
"/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
"/ip4/104.236.176.52/tcp/4001/ipfs/QmSoLnSGccFuZQJzRadHn95W2CrSFmZuTdDWP8HXaHca9z", // neptune.i.ipfs.io
@ -17,16 +18,25 @@ int repo_config_bootstrap_peers_retrieve(struct Libp2pVector** list) {
"/ip4/178.62.61.185/tcp/4001/ipfs/QmSoLMeWqB7YGVLJN3pNLQpmmEk35v6wYtsMGLzSr5QBU3", // mercury.i.ipfs.io
"/ip4/104.236.151.122/tcp/4001/ipfs/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx", // jupiter.i.ipfs.io
};
*list = libp2p_utils_vector_new(9);
*/
*list = libp2p_utils_vector_new(1);
/*
for(int i = 0; i < 9; i++) {
struct MultiAddress* currAddr = multiaddress_new_from_string(default_bootstrap_addresses[i]);
libp2p_utils_vector_add(*list, currAddr);
}
*/
return 1;
}
int repo_config_bootstrap_peers_free(struct Libp2pVector* list) {
if (list != NULL) {
for(int i = 0; i < list->total; i++) {
struct MultiAddress* currAddr = libp2p_utils_vector_get(list, i);
multiaddress_free(currAddr);
}
libp2p_utils_vector_free(list);
}
return 1;
}

View file

@ -74,6 +74,12 @@ int repo_config_get_file_name(char* path, char** result) {
return os_utils_filepath_join(path, "config", *result, max_len);
}
int ipfs_repo_config_is_valid_identity(struct Identity* identity) {
if (identity->peer_id == NULL || identity->peer_id[0] != 'Q' || identity->peer_id[1] != 'm')
return 0;
return 1;
}
/***
* create a configuration based on the passed in parameters
* @param config the configuration struct to be filled in
@ -86,8 +92,9 @@ int repo_config_get_file_name(char* path, char** result) {
int ipfs_repo_config_init(struct RepoConfig* config, unsigned int num_bits_for_keypair, const char* repo_path, int swarm_port, struct Libp2pVector *bootstrap_peers) {
// identity
int retVal = repo_config_identity_init(config->identity, num_bits_for_keypair);
if (retVal == 0)
if (retVal == 0 || !ipfs_repo_config_is_valid_identity(config->identity)) {
return 0;
}
// bootstrap peers
if (bootstrap_peers != NULL) {
@ -165,7 +172,7 @@ int ipfs_repo_config_new(struct RepoConfig** config) {
if (retVal == 0)
return 0;
retVal = repo_config_addresses_new(&((*config)->addresses), "/ip4/127.0.0.1/tcp/5001", "/ip4/127.0.0.1/tcp/8080");
retVal = repo_config_addresses_new(&((*config)->addresses));
if (retVal == 0)
return 0;

View file

@ -46,7 +46,7 @@ int repo_config_identity_init(struct Identity* identity, unsigned long num_bits_
if (!libp2p_crypto_rsa_generate_keypair( &(identity->private_key), num_bits_for_keypair))
return 0;
if (repo_config_identity_build_peer_id(identity) == 0)
if (!repo_config_identity_build_peer_id(identity))
return 0;
return 1;

View file

@ -393,16 +393,21 @@ int fs_repo_open_config(struct FSRepo* repo) {
return 0;
}
// the next should be the array, then string "PeerID"
_get_json_string_value(data, tokens, num_tokens, curr_pos, "PeerID", &repo->config->identity->peer_id);
//NOTE: the code below compares the peer id of the file with the peer id generated
// by the key. If they don't match, we fail.
char* peer_id = NULL;
_get_json_string_value(data, tokens, num_tokens, curr_pos, "PeerID", &peer_id);
char* priv_key_base64;
// then PrivKey
_get_json_string_value(data, tokens, num_tokens, curr_pos, "PrivKey", &priv_key_base64);
retVal = repo_config_identity_build_private_key(repo->config->identity, priv_key_base64);
if (retVal == 0) {
if (retVal == 0 || strcmp(peer_id, repo->config->identity->peer_id) != 0) {
free(data);
free(priv_key_base64);
free(peer_id);
return 0;
}
free(peer_id);
// now the datastore
//int datastore_position = _find_token(data, tokens, num_tokens, 0, "Datastore");
_get_json_string_value(data, tokens, num_tokens, curr_pos, "Type", &repo->config->datastore->type);

View file

@ -71,8 +71,13 @@ int make_ipfs_repository(const char* path, int swarm_port, struct Libp2pVector*
if (retVal == 0)
return 0;
printf("generating 2048-bit RSA keypair...");
retVal = ipfs_repo_config_init(repo_config, 2048, path, swarm_port, bootstrap_peers);
if (retVal == 0)
while (!ipfs_repo_config_init(repo_config, 2048, path, swarm_port, bootstrap_peers)) {
// we got a bad identity... try again
ipfs_repo_config_free(repo_config);
if (!ipfs_repo_config_new(&repo_config))
break;
}
if (repo_config == NULL)
return 0;
printf("done\n");
// now the fs_repo

View file

@ -44,6 +44,12 @@ struct Libp2pMessage* ipfs_routing_online_send_receive_message(struct Stream* st
goto exit;
exit:
if (protobuf != NULL)
free(protobuf);
if (results != NULL)
free(results);
return return_message;
}
@ -210,7 +216,9 @@ int ipfs_routing_online_provide(struct IpfsRouting* routing, char* key, size_t k
struct Libp2pPeer* current_peer = current_peer_entry->peer;
if (current_peer->connection_type == CONNECTION_TYPE_CONNECTED) {
// ignoring results is okay this time
ipfs_routing_online_send_receive_message(current_peer->connection, msg);
struct Libp2pMessage* rslt = ipfs_routing_online_send_receive_message(current_peer->connection, msg);
if (rslt != NULL)
libp2p_message_free(rslt);
}
current = current->next;
}
@ -260,33 +268,43 @@ int ipfs_routing_online_ping(struct IpfsRouting* routing, struct Libp2pPeer* pee
* @returns 0 on success, otherwise error code
*/
int ipfs_routing_online_bootstrap(struct IpfsRouting* routing) {
char* peer_id = NULL;
int peer_id_size = 0;
struct MultiAddress* address = NULL;
struct Libp2pPeer *peer = NULL;
// for each address in our bootstrap list, add info into the peerstore
struct Libp2pVector* bootstrap_peers = routing->local_node->repo->config->bootstrap_peers;
for(int i = 0; i < bootstrap_peers->total; i++) {
struct MultiAddress* address = (struct MultiAddress*)libp2p_utils_vector_get(bootstrap_peers, i);
address = (struct MultiAddress*)libp2p_utils_vector_get(bootstrap_peers, i);
// attempt to get the peer ID
const char* peer_id = multiaddress_get_peer_id(address);
peer_id = multiaddress_get_peer_id(address);
if (peer_id != NULL) {
struct Libp2pPeer* peer = libp2p_peer_new();
peer->id_size = strlen(peer_id);
peer_id_size = strlen(peer_id);
peer = libp2p_peer_new();
peer->id_size = peer_id_size;
peer->id = malloc(peer->id_size);
if (peer->id == NULL) { // out of memory?
libp2p_peer_free(peer);
free(peer_id);
return -1;
}
memcpy(peer->id, peer_id, peer->id_size);
peer->addr_head = libp2p_utils_linked_list_new();
if (peer->addr_head == NULL) { // out of memory?
libp2p_peer_free(peer);
free(peer_id);
return -1;
}
peer->addr_head->item = address;
peer->addr_head->item = multiaddress_copy(address);
libp2p_peerstore_add_peer(routing->local_node->peerstore, peer);
libp2p_peer_free(peer);
// now find it and attempt to connect
peer = libp2p_peerstore_get_peer(routing->local_node->peerstore, (const unsigned char*)peer_id, strlen(peer_id));
if (peer == NULL)
peer = libp2p_peerstore_get_peer(routing->local_node->peerstore, (const unsigned char*)peer_id, peer_id_size);
free(peer_id);
if (peer == NULL) {
return -1; // this should never happen
}
if (peer->connection == NULL) { // should always be true unless we added it twice (TODO: we should prevent that earlier)
libp2p_peer_connect(peer);
}
@ -322,3 +340,8 @@ ipfs_routing* ipfs_routing_new_online (struct IpfsNode* local_node, struct RsaPr
return onlineRouting;
}
int ipfs_routing_online_free(ipfs_routing* incoming) {
free(incoming);
return 1;
}

32
test/core/test_daemon.h Normal file
View file

@ -0,0 +1,32 @@
#pragma once
#include <pthread.h>
#include "ipfs/core/daemon.h"
#include "../test_helper.h"
#include "libp2p/utils/logger.h"
void* test_daemon_start(void* arg) {
ipfs_daemon_start((char*)arg);
return NULL;
}
int test_daemon_startup_shutdown() {
pthread_t daemon_thread;
char* ipfs_path = "/tmp/ipfs";
char* peer_id = NULL;
drop_and_build_repository(ipfs_path, 4001, NULL, &peer_id);
free(peer_id);
pthread_create(&daemon_thread, NULL, test_daemon_start, (void*)ipfs_path);
ipfs_daemon_stop();
pthread_join(daemon_thread, NULL);
libp2p_logger_free();
return 1;
}

View file

@ -3,29 +3,28 @@
int test_repo_fsrepo_open_config() {
struct FSRepo* fs_repo = NULL;
struct RepoConfig* repo_config = NULL;
const char* path = "/tmp/.ipfs";
// create the struct
int retVal = ipfs_repo_fsrepo_new((char*)path, repo_config, &fs_repo);
if (retVal == 0)
if (!drop_build_and_open_repo(path, &fs_repo))
return 0;
// open the repository and read the file
retVal = ipfs_repo_fsrepo_open(fs_repo);
if (retVal == 0) {
ipfs_repo_fsrepo_free(fs_repo);
return 0;
}
retVal = ipfs_repo_fsrepo_free(fs_repo);
if (retVal == 0)
if (!ipfs_repo_fsrepo_free(fs_repo))
return 0;
return 1;
}
int test_repo_fsrepo_build() {
const char* path = "/tmp/.ipfs";
char* peer_id = NULL;
int retVal = drop_and_build_repository(path, 4001, NULL, &peer_id);
if (peer_id != NULL)
free(peer_id);
return retVal;
}
int test_repo_fsrepo_write_read_block() {
struct Block* block = NULL;
struct FSRepo* fs_repo = NULL;

View file

@ -1,11 +1,15 @@
#include <pthread.h>
#include "libp2p/os/utils.h"
#include "libp2p/utils/logger.h"
#include "multiaddr/multiaddr.h"
#include "ipfs/core/daemon.h"
#include "../test_helper.h"
#include "ipfs/routing/routing.h"
#include "../test_helper.h"
void* test_routing_daemon_start(void* arg) {
ipfs_daemon_start((char*)arg);
return NULL;
@ -95,13 +99,17 @@ int test_routing_find_providers() {
// clean out repository
char* ipfs_path = "/tmp/test1";
os_utils_setenv("IPFS_PATH", ipfs_path, 1);
char* peer_id_1;
char* peer_id_2;
struct FSRepo* fs_repo_2;
char* peer_id_3;
char* peer_id_1 = NULL;
char* peer_id_2 = NULL;
struct FSRepo* fs_repo_2 = NULL;;
char* peer_id_3 = NULL;
char* remote_peer_id = NULL;
pthread_t thread1, thread2;
int thread1_started = 0, thread2_started = 0;
struct MultiAddress* ma_peer1;
struct MultiAddress* ma_peer1 = NULL;
struct Libp2pVector* ma_vector2 = NULL, *ma_vector3 = NULL;
struct IpfsNode local_node;
struct FSRepo* fs_repo = NULL;
// create peer 1
drop_and_build_repository(ipfs_path, 4001, NULL, &peer_id_1);
@ -118,9 +126,11 @@ int test_routing_find_providers() {
// create peer 2
ipfs_path = "/tmp/test2";
os_utils_setenv("IPFS_PATH", ipfs_path, 1);
struct Libp2pVector* ma_vector = libp2p_utils_vector_new(1);
libp2p_utils_vector_add(ma_vector, ma_peer1);
drop_and_build_repository(ipfs_path, 4002, ma_vector, &peer_id_2);
// create a vector to hold peer1's multiaddress so we can connect as a peer
ma_vector2 = libp2p_utils_vector_new(1);
libp2p_utils_vector_add(ma_vector2, ma_peer1);
// note: this distroys some things, as it frees the fs_repo:
drop_and_build_repository(ipfs_path, 4002, ma_vector2, &peer_id_2);
// add a file, to prime the connection to peer 1
//TODO: Find a better way to do this...
size_t bytes_written = 0;
@ -143,14 +153,15 @@ int test_routing_find_providers() {
// create my peer, peer 3
ipfs_path = "/tmp/test3";
os_utils_setenv("IPFS_PATH", ipfs_path, 1);
drop_and_build_repository(ipfs_path, 4003, ma_vector, &peer_id_3);
ma_peer1 = multiaddress_new_from_string(multiaddress_string);
ma_vector3 = libp2p_utils_vector_new(1);
libp2p_utils_vector_add(ma_vector3, ma_peer1);
drop_and_build_repository(ipfs_path, 4003, ma_vector3, &peer_id_3);
struct FSRepo* fs_repo;
ipfs_repo_fsrepo_new(ipfs_path, NULL, &fs_repo);
ipfs_repo_fsrepo_open(fs_repo);
// We know peer 1, try to find peer 2
struct IpfsNode local_node;
local_node.mode = MODE_ONLINE;
local_node.peerstore = libp2p_peerstore_new();
local_node.providerstore = libp2p_providerstore_new();
@ -186,16 +197,45 @@ int test_routing_find_providers() {
goto exit;
}
fprintf(stderr, "Remote address is: %s\n", remote_peer->id);
remote_peer_id = malloc(remote_peer->id_size + 1);
memcpy(remote_peer_id, remote_peer->id, remote_peer->id_size);
remote_peer_id[remote_peer->id_size] = 0;
fprintf(stderr, "Remote address is: %s\n", remote_peer_id);
free(remote_peer_id);
remote_peer_id = NULL;
retVal = 1;
exit:
ipfs_daemon_stop();
if (fs_repo != NULL)
ipfs_repo_fsrepo_free(fs_repo);
if (peer_id_1 != NULL)
free(peer_id_1);
if (peer_id_2 != NULL)
free(peer_id_2);
if (peer_id_3 != NULL)
free(peer_id_3);
if (thread1_started)
pthread_cancel(thread1);
pthread_join(thread1, NULL);
if (thread2_started)
pthread_cancel(thread2);
pthread_join(thread2, NULL);
if (ma_vector2 != NULL) {
libp2p_utils_vector_free(ma_vector2);
}
if (ma_vector3 != NULL) {
libp2p_utils_vector_free(ma_vector3);
}
if (local_node.providerstore != NULL)
libp2p_providerstore_free(local_node.providerstore);
if (local_node.peerstore != NULL) {
libp2p_peerstore_free(local_node.peerstore);
}
if (local_node.routing != NULL) {
ipfs_routing_online_free(local_node.routing);
}
if (node != NULL)
ipfs_node_free(node);
libp2p_logger_free();
return retVal;
}

View file

@ -127,17 +127,16 @@ int drop_and_build_repository(const char* path, int swarm_port, struct Libp2pVec
int drop_build_and_open_repo(const char* path, struct FSRepo** fs_repo) {
int retVal = 0;
retVal = drop_and_build_repository("/tmp/.ipfs", 4001, NULL, NULL);
if (retVal == 0)
if (!drop_and_build_repository(path, 4001, NULL, NULL))
return 0;
retVal = ipfs_repo_fsrepo_new("/tmp/.ipfs", NULL, fs_repo);
if (retVal == 0)
if (!ipfs_repo_fsrepo_new(path, NULL, fs_repo))
return 0;
retVal = ipfs_repo_fsrepo_open(*fs_repo);
if (retVal == 0) {
if (!ipfs_repo_fsrepo_open(*fs_repo)) {
free(*fs_repo);
*fs_repo = NULL;
return 0;
}
return 1;

View file

@ -17,6 +17,7 @@
#include "storage/test_unixfs.h"
#include "core/test_ping.h"
#include "core/test_null.h"
#include "core/test_daemon.h"
int testit(const char* name, int (*func)(void)) {
printf("Testing %s...\n", name);
@ -33,12 +34,14 @@ const char* names[] = {
"test_cid_cast_multihash",
"test_cid_cast_non_multihash",
"test_cid_protobuf_encode_decode",
"test_daemon_startup_shutdown",
"test_repo_config_new",
"test_repo_config_init",
"test_repo_config_write",
"test_repo_config_identity_new",
"test_repo_config_identity_private_key",
"test_repo_fsrepo_write_read_block",
"test_repo_fsrepo_build",
"test_routing_supernode_start",
"test_get_init_command",
"test_import_small_file",
@ -76,12 +79,14 @@ int (*funcs[])(void) = {
test_cid_cast_multihash,
test_cid_cast_non_multihash,
test_cid_protobuf_encode_decode,
test_daemon_startup_shutdown,
test_repo_config_new,
test_repo_config_init,
test_repo_config_write,
test_repo_config_identity_new,
test_repo_config_identity_private_key,
test_repo_fsrepo_write_read_block,
test_repo_fsrepo_build,
test_routing_supernode_start,
test_get_init_command,
test_import_small_file,