2017-07-26 14:48:04 +00:00
|
|
|
#pragma once
|
2017-04-13 14:31:58 +00:00
|
|
|
#include <pthread.h>
|
2017-04-24 21:33:59 +00:00
|
|
|
#include <fcntl.h>
|
2017-07-26 14:48:04 +00:00
|
|
|
#include <sys/stat.h>
|
2017-04-13 14:31:58 +00:00
|
|
|
|
|
|
|
#include "libp2p/os/utils.h"
|
2017-04-17 16:58:47 +00:00
|
|
|
#include "libp2p/utils/logger.h"
|
|
|
|
|
2017-04-13 14:31:58 +00:00
|
|
|
#include "multiaddr/multiaddr.h"
|
2017-04-17 16:58:47 +00:00
|
|
|
|
2017-04-13 14:31:58 +00:00
|
|
|
#include "ipfs/core/daemon.h"
|
2017-04-20 22:56:03 +00:00
|
|
|
#include "ipfs/core/ipfs_node.h"
|
2017-04-13 14:31:58 +00:00
|
|
|
#include "ipfs/routing/routing.h"
|
2017-07-26 14:48:04 +00:00
|
|
|
#include "ipfs/importer/importer.h"
|
2017-04-20 22:56:03 +00:00
|
|
|
#include "ipfs/importer/exporter.h"
|
2017-04-13 14:31:58 +00:00
|
|
|
|
2017-04-17 16:58:47 +00:00
|
|
|
#include "../test_helper.h"
|
|
|
|
|
2017-04-13 14:31:58 +00:00
|
|
|
void* test_routing_daemon_start(void* arg) {
|
|
|
|
ipfs_daemon_start((char*)arg);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int test_routing_find_peer() {
|
2017-04-27 16:35:26 +00:00
|
|
|
int retVal = 0;
|
2017-04-13 14:31:58 +00:00
|
|
|
char* ipfs_path = "/tmp/test1";
|
|
|
|
pthread_t thread1;
|
|
|
|
pthread_t thread2;
|
2017-04-27 16:35:26 +00:00
|
|
|
int thread1_started = 0, thread2_started = 0;
|
|
|
|
char* peer_id_1 = NULL;
|
|
|
|
char* peer_id_2 = NULL;
|
|
|
|
char* peer_id_3 = NULL;
|
|
|
|
struct IpfsNode local_node;
|
|
|
|
struct IpfsNode *local_node2;
|
|
|
|
struct FSRepo* fs_repo = NULL;
|
2017-04-13 14:31:58 +00:00
|
|
|
struct MultiAddress* ma_peer1;
|
2017-04-27 16:35:26 +00:00
|
|
|
struct Libp2pVector *ma_vector = NULL;
|
|
|
|
struct Libp2pPeer* result = NULL;
|
|
|
|
struct HashtableNode *node = NULL;
|
2017-04-13 14:31:58 +00:00
|
|
|
|
2017-05-11 18:53:52 +00:00
|
|
|
//libp2p_logger_add_class("online");
|
|
|
|
//libp2p_logger_add_class("null");
|
|
|
|
//libp2p_logger_add_class("daemon");
|
|
|
|
//libp2p_logger_add_class("dht_protocol");
|
|
|
|
//libp2p_logger_add_class("peerstore");
|
|
|
|
|
2017-04-13 14:31:58 +00:00
|
|
|
// create peer 1
|
2017-04-27 16:35:26 +00:00
|
|
|
os_utils_setenv("IPFS_PATH", ipfs_path, 1);
|
2017-04-13 14:31:58 +00:00
|
|
|
drop_and_build_repository(ipfs_path, 4001, NULL, &peer_id_1);
|
|
|
|
char multiaddress_string[255];
|
|
|
|
sprintf(multiaddress_string, "/ip4/127.0.0.1/tcp/4001/ipfs/%s", peer_id_1);
|
|
|
|
ma_peer1 = multiaddress_new_from_string(multiaddress_string);
|
|
|
|
// start the daemon in a separate thread
|
|
|
|
if (pthread_create(&thread1, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0)
|
2017-04-27 16:35:26 +00:00
|
|
|
goto exit;
|
|
|
|
thread1_started = 1;
|
2017-04-13 14:31:58 +00:00
|
|
|
|
2017-05-11 18:53:52 +00:00
|
|
|
sleep(3);
|
|
|
|
|
2017-04-13 14:31:58 +00:00
|
|
|
// create peer 2
|
|
|
|
ipfs_path = "/tmp/test2";
|
|
|
|
os_utils_setenv("IPFS_PATH", ipfs_path, 1);
|
2017-04-27 16:35:26 +00:00
|
|
|
ma_vector = libp2p_utils_vector_new(1);
|
2017-04-13 14:31:58 +00:00
|
|
|
libp2p_utils_vector_add(ma_vector, ma_peer1);
|
|
|
|
drop_and_build_repository(ipfs_path, 4002, ma_vector, &peer_id_2);
|
|
|
|
// add a file, to prime the connection to peer 1
|
|
|
|
//TODO: Find a better way to do this...
|
|
|
|
size_t bytes_written = 0;
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_node_online_new(ipfs_path, &local_node2);
|
2017-05-11 18:53:52 +00:00
|
|
|
local_node2->routing->Bootstrap(local_node2->routing);
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_import_file(NULL, "/home/parallels/ipfstest/hello_world.txt", &node, local_node2, &bytes_written, 0);
|
|
|
|
ipfs_node_free(local_node2);
|
2017-04-13 14:31:58 +00:00
|
|
|
// start the daemon in a separate thread
|
|
|
|
if (pthread_create(&thread2, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0)
|
2017-04-27 16:35:26 +00:00
|
|
|
goto exit;
|
|
|
|
thread2_started = 1;
|
2017-04-13 14:31:58 +00:00
|
|
|
|
2017-05-11 18:53:52 +00:00
|
|
|
// JMJ wait for everything to start up
|
|
|
|
sleep(3);
|
|
|
|
|
|
|
|
// create my peer, peer 3
|
2017-04-13 14:31:58 +00:00
|
|
|
ipfs_path = "/tmp/test3";
|
|
|
|
os_utils_setenv("IPFS_PATH", ipfs_path, 1);
|
2017-04-27 16:35:26 +00:00
|
|
|
libp2p_utils_vector_add(ma_vector, ma_peer1);
|
2017-04-13 14:31:58 +00:00
|
|
|
drop_and_build_repository(ipfs_path, 4003, ma_vector, &peer_id_3);
|
|
|
|
|
|
|
|
ipfs_repo_fsrepo_new(ipfs_path, NULL, &fs_repo);
|
|
|
|
ipfs_repo_fsrepo_open(fs_repo);
|
|
|
|
|
|
|
|
// We know peer 1, try to find peer 2
|
|
|
|
local_node.mode = MODE_ONLINE;
|
2017-07-31 17:50:12 +00:00
|
|
|
local_node.peerstore = libp2p_peerstore_new(fs_repo->config->identity->peer);
|
2017-04-27 16:35:26 +00:00
|
|
|
local_node.providerstore = NULL;
|
2017-04-13 14:31:58 +00:00
|
|
|
local_node.repo = fs_repo;
|
|
|
|
local_node.identity = fs_repo->config->identity;
|
2017-07-27 17:05:41 +00:00
|
|
|
local_node.routing = ipfs_routing_new_online(&local_node, &fs_repo->config->identity->private_key);
|
2017-04-13 14:31:58 +00:00
|
|
|
|
|
|
|
local_node.routing->Bootstrap(local_node.routing);
|
|
|
|
|
2017-05-11 18:53:52 +00:00
|
|
|
if (!local_node.routing->FindPeer(local_node.routing, (unsigned char*)peer_id_2, strlen(peer_id_2), &result)) {
|
|
|
|
fprintf(stderr, "Unable to find peer %s by asking %s\n", peer_id_2, peer_id_1);
|
2017-04-27 16:35:26 +00:00
|
|
|
goto exit;
|
2017-05-11 18:53:52 +00:00
|
|
|
}
|
2017-04-13 14:31:58 +00:00
|
|
|
|
|
|
|
if (result == NULL) {
|
2017-05-11 18:53:52 +00:00
|
|
|
fprintf(stderr, "Result was NULL\n");
|
2017-04-27 16:35:26 +00:00
|
|
|
goto exit;
|
2017-04-13 14:31:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct MultiAddress *remote_address = result->addr_head->item;
|
|
|
|
fprintf(stderr, "Remote address is: %s\n", remote_address->string);
|
|
|
|
|
2017-04-27 16:35:26 +00:00
|
|
|
retVal = 1;
|
|
|
|
exit:
|
|
|
|
ipfs_daemon_stop();
|
|
|
|
if (thread1_started)
|
|
|
|
pthread_join(thread1, NULL);
|
|
|
|
if (thread2_started)
|
|
|
|
pthread_join(thread2, NULL);
|
|
|
|
if (peer_id_1 != NULL)
|
|
|
|
free(peer_id_1);
|
|
|
|
if (peer_id_2 != NULL)
|
|
|
|
free(peer_id_2);
|
|
|
|
if (peer_id_3 != NULL)
|
|
|
|
free(peer_id_3);
|
|
|
|
if (fs_repo != NULL)
|
|
|
|
ipfs_repo_fsrepo_free(fs_repo);
|
|
|
|
if (ma_peer1 != NULL)
|
|
|
|
multiaddress_free(ma_peer1);
|
|
|
|
if (ma_vector != NULL)
|
|
|
|
libp2p_utils_vector_free(ma_vector);
|
|
|
|
if (node != NULL)
|
|
|
|
ipfs_hashtable_node_free(node);
|
|
|
|
if (local_node.peerstore != NULL)
|
|
|
|
libp2p_peerstore_free(local_node.peerstore);
|
|
|
|
if (local_node.routing != NULL)
|
|
|
|
ipfs_routing_online_free(local_node.routing);
|
2017-05-11 18:53:52 +00:00
|
|
|
if (result != NULL)
|
|
|
|
libp2p_peer_free(result);
|
2017-04-27 16:35:26 +00:00
|
|
|
|
|
|
|
return retVal;
|
2017-04-13 14:31:58 +00:00
|
|
|
|
|
|
|
}
|
2017-04-17 04:47:53 +00:00
|
|
|
|
|
|
|
int test_routing_find_providers() {
|
|
|
|
int retVal = 0;
|
|
|
|
// clean out repository
|
|
|
|
char* ipfs_path = "/tmp/test1";
|
|
|
|
os_utils_setenv("IPFS_PATH", ipfs_path, 1);
|
2017-04-17 16:58:47 +00:00
|
|
|
char* peer_id_1 = NULL;
|
|
|
|
char* peer_id_2 = NULL;
|
2017-04-20 22:56:03 +00:00
|
|
|
struct IpfsNode *local_node2 = NULL;;
|
2017-04-17 16:58:47 +00:00
|
|
|
char* peer_id_3 = NULL;
|
|
|
|
char* remote_peer_id = NULL;
|
2017-04-17 04:47:53 +00:00
|
|
|
pthread_t thread1, thread2;
|
|
|
|
int thread1_started = 0, thread2_started = 0;
|
2017-04-17 16:58:47 +00:00
|
|
|
struct MultiAddress* ma_peer1 = NULL;
|
|
|
|
struct Libp2pVector* ma_vector2 = NULL, *ma_vector3 = NULL;
|
|
|
|
struct IpfsNode local_node;
|
|
|
|
struct FSRepo* fs_repo = NULL;
|
2017-07-17 21:14:40 +00:00
|
|
|
struct HashtableNode* node = NULL;
|
|
|
|
struct Libp2pVector* result = NULL;
|
2017-04-17 04:47:53 +00:00
|
|
|
|
|
|
|
// create peer 1
|
|
|
|
drop_and_build_repository(ipfs_path, 4001, NULL, &peer_id_1);
|
|
|
|
char multiaddress_string[255];
|
|
|
|
sprintf(multiaddress_string, "/ip4/127.0.0.1/tcp/4001/ipfs/%s", peer_id_1);
|
|
|
|
ma_peer1 = multiaddress_new_from_string(multiaddress_string);
|
|
|
|
// start the daemon in a separate thread
|
|
|
|
if (pthread_create(&thread1, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0) {
|
|
|
|
fprintf(stderr, "Unable to start thread 1\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
thread1_started = 1;
|
|
|
|
|
|
|
|
// create peer 2
|
|
|
|
ipfs_path = "/tmp/test2";
|
|
|
|
os_utils_setenv("IPFS_PATH", ipfs_path, 1);
|
2017-04-17 16:58:47 +00:00
|
|
|
// create a vector to hold peer1's multiaddress so we can connect as a peer
|
|
|
|
ma_vector2 = libp2p_utils_vector_new(1);
|
|
|
|
libp2p_utils_vector_add(ma_vector2, ma_peer1);
|
2017-04-27 16:35:26 +00:00
|
|
|
// note: this destroys some things, as it frees the fs_repo:
|
2017-04-17 16:58:47 +00:00
|
|
|
drop_and_build_repository(ipfs_path, 4002, ma_vector2, &peer_id_2);
|
2017-04-17 04:47:53 +00:00
|
|
|
// add a file, to prime the connection to peer 1
|
|
|
|
//TODO: Find a better way to do this...
|
|
|
|
size_t bytes_written = 0;
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_node_online_new(ipfs_path, &local_node2);
|
|
|
|
ipfs_import_file(NULL, "/home/parallels/ipfstest/hello_world.txt", &node, local_node2, &bytes_written, 0);
|
|
|
|
ipfs_node_free(local_node2);
|
2017-04-17 04:47:53 +00:00
|
|
|
// start the daemon in a separate thread
|
|
|
|
if (pthread_create(&thread2, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0) {
|
|
|
|
fprintf(stderr, "Unable to start thread 2\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
thread2_started = 1;
|
|
|
|
|
|
|
|
// wait for everything to start up
|
|
|
|
// JMJ debugging =
|
|
|
|
sleep(3);
|
|
|
|
|
|
|
|
// create my peer, peer 3
|
|
|
|
ipfs_path = "/tmp/test3";
|
|
|
|
os_utils_setenv("IPFS_PATH", ipfs_path, 1);
|
2017-04-17 16:58:47 +00:00
|
|
|
ma_peer1 = multiaddress_new_from_string(multiaddress_string);
|
|
|
|
ma_vector3 = libp2p_utils_vector_new(1);
|
|
|
|
libp2p_utils_vector_add(ma_vector3, ma_peer1);
|
|
|
|
drop_and_build_repository(ipfs_path, 4003, ma_vector3, &peer_id_3);
|
2017-04-17 04:47:53 +00:00
|
|
|
|
|
|
|
ipfs_repo_fsrepo_new(ipfs_path, NULL, &fs_repo);
|
|
|
|
ipfs_repo_fsrepo_open(fs_repo);
|
|
|
|
|
|
|
|
// We know peer 1, try to find peer 2
|
|
|
|
local_node.mode = MODE_ONLINE;
|
2017-07-31 17:50:12 +00:00
|
|
|
local_node.peerstore = libp2p_peerstore_new(fs_repo->config->identity->peer);
|
|
|
|
local_node.providerstore = libp2p_providerstore_new(fs_repo->config->datastore, fs_repo->config->identity->peer);
|
2017-04-17 04:47:53 +00:00
|
|
|
local_node.repo = fs_repo;
|
|
|
|
local_node.identity = fs_repo->config->identity;
|
2017-07-27 17:05:41 +00:00
|
|
|
local_node.routing = ipfs_routing_new_online(&local_node, &fs_repo->config->identity->private_key);
|
2017-04-17 04:47:53 +00:00
|
|
|
|
|
|
|
local_node.routing->Bootstrap(local_node.routing);
|
|
|
|
|
|
|
|
if (!local_node.routing->FindProviders(local_node.routing, node->hash, node->hash_size, &result)) {
|
|
|
|
fprintf(stderr, "Unable to find a provider\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (result == NULL) {
|
|
|
|
fprintf(stderr, "Provider array is NULL\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
// connect to peer 2
|
|
|
|
struct Libp2pPeer *remote_peer = NULL;
|
|
|
|
for(int i = 0; i < result->total; i++) {
|
2017-07-26 12:38:47 +00:00
|
|
|
remote_peer = (struct Libp2pPeer*)libp2p_utils_vector_get(result, i);
|
2017-07-31 21:36:52 +00:00
|
|
|
if (remote_peer->connection_type == CONNECTION_TYPE_CONNECTED || libp2p_peer_connect(&local_node.identity->private_key, remote_peer, local_node.peerstore, 5)) {
|
2017-04-17 04:47:53 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
remote_peer = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (remote_peer == NULL) {
|
|
|
|
fprintf(stderr, "Remote Peer is NULL\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2017-04-17 16:58:47 +00:00
|
|
|
remote_peer_id = malloc(remote_peer->id_size + 1);
|
|
|
|
memcpy(remote_peer_id, remote_peer->id, remote_peer->id_size);
|
|
|
|
remote_peer_id[remote_peer->id_size] = 0;
|
|
|
|
fprintf(stderr, "Remote address is: %s\n", remote_peer_id);
|
|
|
|
free(remote_peer_id);
|
|
|
|
remote_peer_id = NULL;
|
2017-04-17 04:47:53 +00:00
|
|
|
|
|
|
|
retVal = 1;
|
|
|
|
exit:
|
2017-04-17 16:58:47 +00:00
|
|
|
ipfs_daemon_stop();
|
2017-04-17 04:47:53 +00:00
|
|
|
if (fs_repo != NULL)
|
2017-04-17 16:58:47 +00:00
|
|
|
ipfs_repo_fsrepo_free(fs_repo);
|
|
|
|
if (peer_id_1 != NULL)
|
|
|
|
free(peer_id_1);
|
|
|
|
if (peer_id_2 != NULL)
|
|
|
|
free(peer_id_2);
|
|
|
|
if (peer_id_3 != NULL)
|
|
|
|
free(peer_id_3);
|
2017-04-17 04:47:53 +00:00
|
|
|
if (thread1_started)
|
2017-04-17 16:58:47 +00:00
|
|
|
pthread_join(thread1, NULL);
|
2017-04-17 04:47:53 +00:00
|
|
|
if (thread2_started)
|
2017-04-17 16:58:47 +00:00
|
|
|
pthread_join(thread2, NULL);
|
|
|
|
if (ma_vector2 != NULL) {
|
|
|
|
libp2p_utils_vector_free(ma_vector2);
|
|
|
|
}
|
|
|
|
if (ma_vector3 != NULL) {
|
|
|
|
libp2p_utils_vector_free(ma_vector3);
|
|
|
|
}
|
|
|
|
if (local_node.providerstore != NULL)
|
|
|
|
libp2p_providerstore_free(local_node.providerstore);
|
|
|
|
if (local_node.peerstore != NULL) {
|
|
|
|
libp2p_peerstore_free(local_node.peerstore);
|
|
|
|
}
|
|
|
|
if (local_node.routing != NULL) {
|
|
|
|
ipfs_routing_online_free(local_node.routing);
|
|
|
|
}
|
2017-04-17 19:02:33 +00:00
|
|
|
if (node != NULL)
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_hashtable_node_free(node);
|
2017-04-17 19:02:33 +00:00
|
|
|
if (result != NULL) {
|
|
|
|
// we have a vector of peers. Clean 'em up:
|
2017-05-11 18:53:52 +00:00
|
|
|
/* free the vector, not the peers.
|
2017-04-17 19:02:33 +00:00
|
|
|
for(int i = 0; i < result->total; i++) {
|
|
|
|
struct Libp2pPeer* p = (struct Libp2pPeer*)libp2p_utils_vector_get(result, i);
|
|
|
|
libp2p_peer_free(p);
|
|
|
|
}
|
2017-05-11 18:53:52 +00:00
|
|
|
*/
|
2017-04-17 19:02:33 +00:00
|
|
|
libp2p_utils_vector_free(result);
|
|
|
|
}
|
|
|
|
return retVal;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/***
|
|
|
|
* Fire up a "client" and "server" and let the client tell the server he's providing a file
|
|
|
|
*/
|
|
|
|
int test_routing_provide() {
|
|
|
|
int retVal = 0;
|
|
|
|
// clean out repository
|
|
|
|
char* ipfs_path = "/tmp/test1";
|
|
|
|
os_utils_setenv("IPFS_PATH", ipfs_path, 1);
|
|
|
|
char* peer_id_1 = NULL;
|
|
|
|
char* peer_id_2 = NULL;
|
2017-04-20 22:56:03 +00:00
|
|
|
struct IpfsNode *local_node2 = NULL;
|
2017-04-17 19:02:33 +00:00
|
|
|
pthread_t thread1, thread2;
|
|
|
|
int thread1_started = 0, thread2_started = 0;
|
|
|
|
struct MultiAddress* ma_peer1 = NULL;
|
|
|
|
struct Libp2pVector* ma_vector2 = NULL;
|
2017-07-17 21:14:40 +00:00
|
|
|
struct HashtableNode* node = NULL;
|
2017-04-17 19:02:33 +00:00
|
|
|
|
2017-07-27 19:33:19 +00:00
|
|
|
libp2p_logger_add_class("daemon");
|
|
|
|
libp2p_logger_add_class("null");
|
|
|
|
|
2017-04-17 19:02:33 +00:00
|
|
|
// create peer 1
|
|
|
|
drop_and_build_repository(ipfs_path, 4001, NULL, &peer_id_1);
|
|
|
|
char multiaddress_string[255];
|
|
|
|
sprintf(multiaddress_string, "/ip4/127.0.0.1/tcp/4001/ipfs/%s", peer_id_1);
|
|
|
|
ma_peer1 = multiaddress_new_from_string(multiaddress_string);
|
|
|
|
// start the daemon in a separate thread
|
|
|
|
if (pthread_create(&thread1, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0) {
|
|
|
|
fprintf(stderr, "Unable to start thread 1\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
thread1_started = 1;
|
|
|
|
|
|
|
|
// create peer 2
|
|
|
|
ipfs_path = "/tmp/test2";
|
|
|
|
os_utils_setenv("IPFS_PATH", ipfs_path, 1);
|
|
|
|
// create a vector to hold peer1's multiaddress so we can connect as a peer
|
|
|
|
ma_vector2 = libp2p_utils_vector_new(1);
|
|
|
|
libp2p_utils_vector_add(ma_vector2, ma_peer1);
|
|
|
|
// note: this distroys some things, as it frees the fs_repo:
|
|
|
|
drop_and_build_repository(ipfs_path, 4002, ma_vector2, &peer_id_2);
|
|
|
|
// add a file, to prime the connection to peer 1
|
|
|
|
//TODO: Find a better way to do this...
|
|
|
|
size_t bytes_written = 0;
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_node_online_new(ipfs_path, &local_node2);
|
|
|
|
ipfs_import_file(NULL, "/home/parallels/ipfstest/hello_world.txt", &node, local_node2, &bytes_written, 0);
|
|
|
|
ipfs_node_free(local_node2);
|
2017-04-17 19:02:33 +00:00
|
|
|
// start the daemon in a separate thread
|
|
|
|
if (pthread_create(&thread2, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0) {
|
|
|
|
fprintf(stderr, "Unable to start thread 2\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
thread2_started = 1;
|
|
|
|
|
|
|
|
// wait for everything to start up
|
|
|
|
// JMJ debugging =
|
|
|
|
sleep(3);
|
|
|
|
|
|
|
|
retVal = 1;
|
|
|
|
exit:
|
|
|
|
ipfs_daemon_stop();
|
|
|
|
if (peer_id_1 != NULL)
|
|
|
|
free(peer_id_1);
|
|
|
|
if (peer_id_2 != NULL)
|
|
|
|
free(peer_id_2);
|
|
|
|
if (thread1_started)
|
|
|
|
pthread_join(thread1, NULL);
|
|
|
|
if (thread2_started)
|
|
|
|
pthread_join(thread2, NULL);
|
|
|
|
if (ma_vector2 != NULL) {
|
|
|
|
libp2p_utils_vector_free(ma_vector2);
|
|
|
|
}
|
2017-04-17 16:58:47 +00:00
|
|
|
if (node != NULL)
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_hashtable_node_free(node);
|
2017-07-27 19:33:19 +00:00
|
|
|
if (ma_peer1 != NULL)
|
|
|
|
multiaddress_free(ma_peer1);
|
2017-04-20 22:56:03 +00:00
|
|
|
return retVal;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/***
|
|
|
|
* Attempt to retrieve a file from a previously unknown node
|
|
|
|
*/
|
|
|
|
int test_routing_retrieve_file_third_party() {
|
|
|
|
int retVal = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
libp2p_logger_add_class("online");
|
|
|
|
libp2p_logger_add_class("multistream");
|
|
|
|
libp2p_logger_add_class("null");
|
|
|
|
libp2p_logger_add_class("dht_protocol");
|
|
|
|
libp2p_logger_add_class("providerstore");
|
|
|
|
libp2p_logger_add_class("peerstore");
|
|
|
|
libp2p_logger_add_class("exporter");
|
|
|
|
libp2p_logger_add_class("peer");
|
|
|
|
libp2p_logger_add_class("test_routing");
|
|
|
|
*/
|
|
|
|
|
|
|
|
// clean out repository
|
|
|
|
char* ipfs_path = "/tmp/test1";
|
|
|
|
char* peer_id_1 = NULL, *peer_id_2 = NULL, *peer_id_3 = NULL;
|
|
|
|
struct IpfsNode* ipfs_node2 = NULL, *ipfs_node3 = NULL;
|
|
|
|
pthread_t thread1, thread2;
|
|
|
|
int thread1_started = 0, thread2_started = 0;
|
|
|
|
struct MultiAddress* ma_peer1 = NULL;
|
|
|
|
struct Libp2pVector* ma_vector2 = NULL, *ma_vector3 = NULL;
|
|
|
|
struct HashtableNode* node = NULL, *result_node = NULL;
|
|
|
|
|
|
|
|
// create peer 1
|
|
|
|
drop_and_build_repository(ipfs_path, 4001, NULL, &peer_id_1);
|
|
|
|
char multiaddress_string[255];
|
|
|
|
sprintf(multiaddress_string, "/ip4/127.0.0.1/tcp/4001/ipfs/%s", peer_id_1);
|
|
|
|
ma_peer1 = multiaddress_new_from_string(multiaddress_string);
|
|
|
|
// start the daemon in a separate thread
|
|
|
|
libp2p_logger_debug("test_routing", "Firing up daemon 1.\n");
|
|
|
|
if (pthread_create(&thread1, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0) {
|
|
|
|
fprintf(stderr, "Unable to start thread 1\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
thread1_started = 1;
|
|
|
|
|
|
|
|
// wait for everything to start up
|
|
|
|
// JMJ debugging =
|
|
|
|
sleep(3);
|
|
|
|
|
|
|
|
// create peer 2
|
|
|
|
ipfs_path = "/tmp/test2";
|
|
|
|
// create a vector to hold peer1's multiaddress so we can connect as a peer
|
|
|
|
ma_vector2 = libp2p_utils_vector_new(1);
|
|
|
|
libp2p_utils_vector_add(ma_vector2, ma_peer1);
|
2017-05-11 18:53:52 +00:00
|
|
|
// note: this destroys some things, as it frees the fs_repo_3:
|
2017-04-20 22:56:03 +00:00
|
|
|
drop_and_build_repository(ipfs_path, 4002, ma_vector2, &peer_id_2);
|
2017-05-11 18:53:52 +00:00
|
|
|
multiaddress_free(ma_peer1);
|
2017-04-20 22:56:03 +00:00
|
|
|
// add a file, to prime the connection to peer 1
|
|
|
|
//TODO: Find a better way to do this...
|
|
|
|
size_t bytes_written = 0;
|
|
|
|
if (!ipfs_node_online_new(ipfs_path, &ipfs_node2))
|
|
|
|
goto exit;
|
|
|
|
ipfs_node2->routing->Bootstrap(ipfs_node2->routing);
|
|
|
|
ipfs_import_file(NULL, "/home/parallels/ipfstest/hello_world.txt", &node, ipfs_node2, &bytes_written, 0);
|
|
|
|
ipfs_node_free(ipfs_node2);
|
|
|
|
// start the daemon in a separate thread
|
|
|
|
libp2p_logger_debug("test_routing", "Firing up daemon 2.\n");
|
|
|
|
if (pthread_create(&thread2, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0) {
|
|
|
|
fprintf(stderr, "Unable to start thread 2\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
thread2_started = 1;
|
|
|
|
|
|
|
|
// wait for everything to start up
|
|
|
|
// JMJ debugging =
|
|
|
|
sleep(3);
|
|
|
|
|
|
|
|
libp2p_logger_debug("test_routing", "Firing up the 3rd client\n");
|
|
|
|
// create my peer, peer 3
|
|
|
|
ipfs_path = "/tmp/test3";
|
|
|
|
ma_peer1 = multiaddress_new_from_string(multiaddress_string);
|
|
|
|
ma_vector3 = libp2p_utils_vector_new(1);
|
|
|
|
libp2p_utils_vector_add(ma_vector3, ma_peer1);
|
|
|
|
drop_and_build_repository(ipfs_path, 4003, ma_vector3, &peer_id_3);
|
2017-05-11 18:53:52 +00:00
|
|
|
multiaddress_free(ma_peer1);
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_node_online_new(ipfs_path, &ipfs_node3);
|
|
|
|
|
|
|
|
ipfs_node3->routing->Bootstrap(ipfs_node3->routing);
|
|
|
|
|
|
|
|
if (!ipfs_exporter_get_node(ipfs_node3, node->hash, node->hash_size, &result_node)) {
|
|
|
|
fprintf(stderr, "Get_Node returned false\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (node->hash_size != result_node->hash_size) {
|
|
|
|
fprintf(stderr, "Node hash sizes do not match. Should be %lu but is %lu\n", node->hash_size, result_node->hash_size);
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (node->data_size != result_node->data_size) {
|
|
|
|
fprintf(stderr, "Result sizes do not match. Should be %lu but is %lu\n", node->data_size, result_node->data_size);
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
retVal = 1;
|
|
|
|
exit:
|
|
|
|
ipfs_daemon_stop();
|
|
|
|
if (thread1_started)
|
|
|
|
pthread_join(thread1, NULL);
|
|
|
|
if (thread2_started)
|
|
|
|
pthread_join(thread2, NULL);
|
|
|
|
if (ipfs_node3 != NULL)
|
|
|
|
ipfs_node_free(ipfs_node3);
|
|
|
|
if (peer_id_1 != NULL)
|
|
|
|
free(peer_id_1);
|
|
|
|
if (peer_id_2 != NULL)
|
|
|
|
free(peer_id_2);
|
|
|
|
if (peer_id_3 != NULL)
|
|
|
|
free(peer_id_3);
|
|
|
|
if (ma_vector2 != NULL) {
|
|
|
|
libp2p_utils_vector_free(ma_vector2);
|
|
|
|
}
|
|
|
|
if (ma_vector3 != NULL) {
|
|
|
|
libp2p_utils_vector_free(ma_vector3);
|
|
|
|
}
|
|
|
|
if (node != NULL)
|
|
|
|
ipfs_hashtable_node_free(node);
|
|
|
|
if (result_node != NULL)
|
|
|
|
ipfs_hashtable_node_free(result_node);
|
2017-04-17 04:47:53 +00:00
|
|
|
return retVal;
|
|
|
|
|
|
|
|
}
|
2017-04-24 21:33:59 +00:00
|
|
|
|
|
|
|
/***
|
|
|
|
* Attempt to retrieve a large file from a previously unknown node
|
|
|
|
*/
|
|
|
|
int test_routing_retrieve_large_file() {
|
|
|
|
int retVal = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
libp2p_logger_add_class("multistream");
|
|
|
|
libp2p_logger_add_class("null");
|
|
|
|
libp2p_logger_add_class("dht_protocol");
|
|
|
|
libp2p_logger_add_class("providerstore");
|
|
|
|
libp2p_logger_add_class("peerstore");
|
|
|
|
libp2p_logger_add_class("peer");
|
|
|
|
libp2p_logger_add_class("test_routing");
|
|
|
|
*/
|
|
|
|
|
|
|
|
libp2p_logger_add_class("exporter");
|
|
|
|
libp2p_logger_add_class("online");
|
|
|
|
|
|
|
|
// clean out repository
|
|
|
|
char* ipfs_path = "/tmp/test1";
|
|
|
|
char* peer_id_1 = NULL, *peer_id_2 = NULL, *peer_id_3 = NULL;
|
|
|
|
struct IpfsNode* ipfs_node2 = NULL, *ipfs_node3 = NULL;
|
|
|
|
pthread_t thread1, thread2;
|
|
|
|
int thread1_started = 0, thread2_started = 0;
|
|
|
|
struct MultiAddress* ma_peer1 = NULL;
|
|
|
|
struct Libp2pVector* ma_vector2 = NULL, *ma_vector3 = NULL;
|
|
|
|
struct HashtableNode* node = NULL, *result_node = NULL;
|
|
|
|
FILE *fd;
|
|
|
|
char* temp_file_name = "/tmp/largefile.tmp";
|
|
|
|
|
|
|
|
unlink(temp_file_name);
|
|
|
|
|
|
|
|
// create peer 1
|
|
|
|
drop_and_build_repository(ipfs_path, 4001, NULL, &peer_id_1);
|
|
|
|
char multiaddress_string[255];
|
|
|
|
sprintf(multiaddress_string, "/ip4/127.0.0.1/tcp/4001/ipfs/%s", peer_id_1);
|
|
|
|
ma_peer1 = multiaddress_new_from_string(multiaddress_string);
|
|
|
|
// start the daemon in a separate thread
|
|
|
|
libp2p_logger_debug("test_routing", "Firing up daemon 1.\n");
|
|
|
|
if (pthread_create(&thread1, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0) {
|
|
|
|
fprintf(stderr, "Unable to start thread 1\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
thread1_started = 1;
|
|
|
|
|
|
|
|
// wait for everything to start up
|
|
|
|
// JMJ debugging =
|
|
|
|
sleep(3);
|
|
|
|
|
|
|
|
// create peer 2
|
|
|
|
ipfs_path = "/tmp/test2";
|
|
|
|
// create a vector to hold peer1's multiaddress so we can connect as a peer
|
|
|
|
ma_vector2 = libp2p_utils_vector_new(1);
|
|
|
|
libp2p_utils_vector_add(ma_vector2, ma_peer1);
|
|
|
|
// note: this distroys some things, as it frees the fs_repo_3:
|
|
|
|
drop_and_build_repository(ipfs_path, 4002, ma_vector2, &peer_id_2);
|
2017-05-11 18:53:52 +00:00
|
|
|
multiaddress_free(ma_peer1);
|
2017-04-24 21:33:59 +00:00
|
|
|
// add a file, to prime the connection to peer 1
|
|
|
|
//TODO: Find a better way to do this...
|
|
|
|
size_t bytes_written = 0;
|
|
|
|
if (!ipfs_node_online_new(ipfs_path, &ipfs_node2))
|
|
|
|
goto exit;
|
|
|
|
ipfs_node2->routing->Bootstrap(ipfs_node2->routing);
|
|
|
|
ipfs_import_file(NULL, "/home/parallels/ipfstest/test_import_large.tmp", &node, ipfs_node2, &bytes_written, 0);
|
|
|
|
ipfs_node_free(ipfs_node2);
|
|
|
|
// start the daemon in a separate thread
|
|
|
|
libp2p_logger_debug("test_routing", "Firing up daemon 2.\n");
|
|
|
|
if (pthread_create(&thread2, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0) {
|
|
|
|
fprintf(stderr, "Unable to start thread 2\n");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
thread2_started = 1;
|
|
|
|
|
|
|
|
// wait for everything to start up
|
|
|
|
// JMJ debugging =
|
|
|
|
sleep(3);
|
|
|
|
|
|
|
|
// see if we get the entire file
|
|
|
|
libp2p_logger_debug("test_routing", "Firing up the 3rd client\n");
|
|
|
|
// create my peer, peer 3
|
|
|
|
ipfs_path = "/tmp/test3";
|
|
|
|
ma_peer1 = multiaddress_new_from_string(multiaddress_string);
|
|
|
|
ma_vector3 = libp2p_utils_vector_new(1);
|
|
|
|
libp2p_utils_vector_add(ma_vector3, ma_peer1);
|
|
|
|
drop_and_build_repository(ipfs_path, 4003, ma_vector3, &peer_id_3);
|
2017-05-11 18:53:52 +00:00
|
|
|
multiaddress_free(ma_peer1);
|
2017-04-24 21:33:59 +00:00
|
|
|
ipfs_node_online_new(ipfs_path, &ipfs_node3);
|
|
|
|
|
|
|
|
ipfs_node3->routing->Bootstrap(ipfs_node3->routing);
|
|
|
|
|
|
|
|
|
|
|
|
fd = fopen(temp_file_name, "w+");
|
|
|
|
ipfs_exporter_object_cat_to_file(ipfs_node3, node->hash, node->hash_size, fd);
|
|
|
|
fclose(fd);
|
|
|
|
|
|
|
|
struct stat buf;
|
|
|
|
stat(temp_file_name, &buf);
|
|
|
|
|
|
|
|
if (buf.st_size != 1000000) {
|
|
|
|
fprintf(stderr, "File size should be 1000000, but is %lu\n", buf.st_size);
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
retVal = 1;
|
|
|
|
exit:
|
|
|
|
ipfs_daemon_stop();
|
|
|
|
if (thread1_started)
|
|
|
|
pthread_join(thread1, NULL);
|
|
|
|
if (thread2_started)
|
|
|
|
pthread_join(thread2, NULL);
|
|
|
|
if (ipfs_node3 != NULL)
|
|
|
|
ipfs_node_free(ipfs_node3);
|
|
|
|
if (peer_id_1 != NULL)
|
|
|
|
free(peer_id_1);
|
|
|
|
if (peer_id_2 != NULL)
|
|
|
|
free(peer_id_2);
|
|
|
|
if (peer_id_3 != NULL)
|
|
|
|
free(peer_id_3);
|
|
|
|
if (ma_vector2 != NULL) {
|
|
|
|
libp2p_utils_vector_free(ma_vector2);
|
|
|
|
}
|
|
|
|
if (ma_vector3 != NULL) {
|
|
|
|
libp2p_utils_vector_free(ma_vector3);
|
|
|
|
}
|
|
|
|
if (node != NULL)
|
|
|
|
ipfs_hashtable_node_free(node);
|
|
|
|
if (result_node != NULL)
|
|
|
|
ipfs_hashtable_node_free(result_node);
|
|
|
|
return retVal;
|
|
|
|
|
|
|
|
}
|