Tested and made adjustments for large file transfers
This commit is contained in:
parent
03696dd6e7
commit
43bf2caeff
5 changed files with 173 additions and 15 deletions
|
@ -210,15 +210,16 @@ int ipfs_exporter_object_get(int argc, char** argv) {
|
|||
return retVal;
|
||||
}
|
||||
|
||||
int ipfs_exporter_cat_node(struct HashtableNode* node, struct IpfsNode* local_node) {
|
||||
int ipfs_exporter_cat_node(struct HashtableNode* node, struct IpfsNode* local_node, FILE *file) {
|
||||
// process this node, then move on to the links
|
||||
|
||||
// build the unixfs
|
||||
struct UnixFS* unix_fs;
|
||||
ipfs_unixfs_protobuf_decode(node->data, node->data_size, &unix_fs);
|
||||
for(size_t i = 0LU; i < unix_fs->bytes_size; i++) {
|
||||
printf("%c", unix_fs->bytes[i]);
|
||||
fprintf(file, "%c", unix_fs->bytes[i]);
|
||||
}
|
||||
ipfs_unixfs_free(unix_fs);
|
||||
// process links
|
||||
struct NodeLink* current = node->head_link;
|
||||
while (current != NULL) {
|
||||
|
@ -227,7 +228,7 @@ int ipfs_exporter_cat_node(struct HashtableNode* node, struct IpfsNode* local_no
|
|||
if (!ipfs_exporter_get_node(local_node, current->hash, current->hash_size, &child_node)) {
|
||||
return 0;
|
||||
}
|
||||
ipfs_exporter_cat_node(child_node, local_node);
|
||||
ipfs_exporter_cat_node(child_node, local_node, file);
|
||||
ipfs_hashtable_node_free(child_node);
|
||||
current = current->next;
|
||||
}
|
||||
|
@ -235,8 +236,22 @@ int ipfs_exporter_cat_node(struct HashtableNode* node, struct IpfsNode* local_no
|
|||
return 1;
|
||||
}
|
||||
|
||||
int ipfs_exporter_object_cat_to_file(struct IpfsNode *local_node, unsigned char* hash, int hash_size, FILE* file) {
|
||||
struct HashtableNode* read_node = NULL;
|
||||
|
||||
// find block
|
||||
if (!ipfs_exporter_get_node(local_node, hash, hash_size, &read_node)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int retVal = ipfs_exporter_cat_node(read_node, local_node, file);
|
||||
ipfs_hashtable_node_free(read_node);
|
||||
return retVal;
|
||||
}
|
||||
|
||||
/***
|
||||
* Called from the command line with ipfs cat [hash]. Retrieves the object pointed to by hash, and displays its block data (links and data elements)
|
||||
* Called from the command line with ipfs cat [hash]. Retrieves the object
|
||||
* pointed to by hash, and displays its raw block data to the console
|
||||
* @param argc number of arguments
|
||||
* @param argv arguments
|
||||
* @returns true(1) on success
|
||||
|
@ -260,18 +275,9 @@ int ipfs_exporter_object_cat(int argc, char** argv) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// find block
|
||||
struct HashtableNode* read_node = NULL;
|
||||
if (ipfs_exporter_get_node(local_node, cid->hash, cid->hash_length, &read_node)) {
|
||||
ipfs_cid_free(cid);
|
||||
return 0;
|
||||
}
|
||||
// no longer need the cid
|
||||
int retVal = ipfs_exporter_object_cat_to_file(local_node, cid->hash, cid->hash_length, stdout);
|
||||
ipfs_cid_free(cid);
|
||||
|
||||
int retVal = ipfs_exporter_cat_node(read_node, local_node);
|
||||
ipfs_hashtable_node_free(read_node);
|
||||
|
||||
return retVal;
|
||||
|
||||
}
|
||||
|
|
|
@ -293,7 +293,14 @@ int ipfs_import_file(const char* root_dir, const char* fileName, struct Hashtabl
|
|||
}
|
||||
|
||||
// notify the network
|
||||
local_node->routing->Provide(local_node->routing, (*parent_node)->hash, (*parent_node)->hash_size);
|
||||
struct HashtableNode *htn = *parent_node;
|
||||
local_node->routing->Provide(local_node->routing, htn->hash, htn->hash_size);
|
||||
// notif the network of the subnodes too
|
||||
struct NodeLink *nl = htn->head_link;
|
||||
while (nl != NULL) {
|
||||
local_node->routing->Provide(local_node->routing, nl->hash, nl->hash_size);
|
||||
nl = nl->next;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -33,3 +33,13 @@ int ipfs_exporter_object_get(int argc, char** argv);
|
|||
* @returns true(1) on success
|
||||
*/
|
||||
int ipfs_exporter_object_cat(int argc, char** argv);
|
||||
|
||||
/**
|
||||
* Retrieves the object pointed to by hash and displays the raw data
|
||||
* @param local_node the local node
|
||||
* @param hash the hash to use
|
||||
* @param hash_size the length of the hash
|
||||
* @param file the file descrptor to write to
|
||||
* @returns true(1) on success, false(0) otherwise
|
||||
*/
|
||||
int ipfs_exporter_object_cat_to_file(struct IpfsNode *local_node, unsigned char* hash, int hash_size, FILE* file);
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <pthread.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#include "libp2p/os/utils.h"
|
||||
#include "libp2p/utils/logger.h"
|
||||
|
@ -450,3 +451,135 @@ int test_routing_retrieve_file_third_party() {
|
|||
return retVal;
|
||||
|
||||
}
|
||||
|
||||
/***
|
||||
* Attempt to retrieve a large file from a previously unknown node
|
||||
*/
|
||||
int test_routing_retrieve_large_file() {
|
||||
int retVal = 0;
|
||||
|
||||
/*
|
||||
libp2p_logger_add_class("multistream");
|
||||
libp2p_logger_add_class("null");
|
||||
libp2p_logger_add_class("dht_protocol");
|
||||
libp2p_logger_add_class("providerstore");
|
||||
libp2p_logger_add_class("peerstore");
|
||||
libp2p_logger_add_class("peer");
|
||||
libp2p_logger_add_class("test_routing");
|
||||
*/
|
||||
|
||||
libp2p_logger_add_class("exporter");
|
||||
libp2p_logger_add_class("online");
|
||||
|
||||
// clean out repository
|
||||
char* ipfs_path = "/tmp/test1";
|
||||
char* peer_id_1 = NULL, *peer_id_2 = NULL, *peer_id_3 = NULL;
|
||||
struct IpfsNode* ipfs_node2 = NULL, *ipfs_node3 = NULL;
|
||||
pthread_t thread1, thread2;
|
||||
int thread1_started = 0, thread2_started = 0;
|
||||
struct MultiAddress* ma_peer1 = NULL;
|
||||
struct Libp2pVector* ma_vector2 = NULL, *ma_vector3 = NULL;
|
||||
struct HashtableNode* node = NULL, *result_node = NULL;
|
||||
FILE *fd;
|
||||
char* temp_file_name = "/tmp/largefile.tmp";
|
||||
|
||||
unlink(temp_file_name);
|
||||
|
||||
// create peer 1
|
||||
drop_and_build_repository(ipfs_path, 4001, NULL, &peer_id_1);
|
||||
char multiaddress_string[255];
|
||||
sprintf(multiaddress_string, "/ip4/127.0.0.1/tcp/4001/ipfs/%s", peer_id_1);
|
||||
ma_peer1 = multiaddress_new_from_string(multiaddress_string);
|
||||
// start the daemon in a separate thread
|
||||
libp2p_logger_debug("test_routing", "Firing up daemon 1.\n");
|
||||
if (pthread_create(&thread1, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0) {
|
||||
fprintf(stderr, "Unable to start thread 1\n");
|
||||
goto exit;
|
||||
}
|
||||
thread1_started = 1;
|
||||
|
||||
// wait for everything to start up
|
||||
// JMJ debugging =
|
||||
sleep(3);
|
||||
|
||||
// create peer 2
|
||||
ipfs_path = "/tmp/test2";
|
||||
// create a vector to hold peer1's multiaddress so we can connect as a peer
|
||||
ma_vector2 = libp2p_utils_vector_new(1);
|
||||
libp2p_utils_vector_add(ma_vector2, ma_peer1);
|
||||
// note: this distroys some things, as it frees the fs_repo_3:
|
||||
drop_and_build_repository(ipfs_path, 4002, ma_vector2, &peer_id_2);
|
||||
// add a file, to prime the connection to peer 1
|
||||
//TODO: Find a better way to do this...
|
||||
size_t bytes_written = 0;
|
||||
if (!ipfs_node_online_new(ipfs_path, &ipfs_node2))
|
||||
goto exit;
|
||||
ipfs_node2->routing->Bootstrap(ipfs_node2->routing);
|
||||
ipfs_import_file(NULL, "/home/parallels/ipfstest/test_import_large.tmp", &node, ipfs_node2, &bytes_written, 0);
|
||||
ipfs_node_free(ipfs_node2);
|
||||
// start the daemon in a separate thread
|
||||
libp2p_logger_debug("test_routing", "Firing up daemon 2.\n");
|
||||
if (pthread_create(&thread2, NULL, test_routing_daemon_start, (void*)ipfs_path) < 0) {
|
||||
fprintf(stderr, "Unable to start thread 2\n");
|
||||
goto exit;
|
||||
}
|
||||
thread2_started = 1;
|
||||
|
||||
// wait for everything to start up
|
||||
// JMJ debugging =
|
||||
sleep(3);
|
||||
|
||||
// see if we get the entire file
|
||||
libp2p_logger_debug("test_routing", "Firing up the 3rd client\n");
|
||||
// create my peer, peer 3
|
||||
ipfs_path = "/tmp/test3";
|
||||
ma_peer1 = multiaddress_new_from_string(multiaddress_string);
|
||||
ma_vector3 = libp2p_utils_vector_new(1);
|
||||
libp2p_utils_vector_add(ma_vector3, ma_peer1);
|
||||
drop_and_build_repository(ipfs_path, 4003, ma_vector3, &peer_id_3);
|
||||
ipfs_node_online_new(ipfs_path, &ipfs_node3);
|
||||
|
||||
ipfs_node3->routing->Bootstrap(ipfs_node3->routing);
|
||||
|
||||
|
||||
fd = fopen(temp_file_name, "w+");
|
||||
ipfs_exporter_object_cat_to_file(ipfs_node3, node->hash, node->hash_size, fd);
|
||||
fclose(fd);
|
||||
|
||||
struct stat buf;
|
||||
stat(temp_file_name, &buf);
|
||||
|
||||
if (buf.st_size != 1000000) {
|
||||
fprintf(stderr, "File size should be 1000000, but is %lu\n", buf.st_size);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
retVal = 1;
|
||||
exit:
|
||||
ipfs_daemon_stop();
|
||||
if (thread1_started)
|
||||
pthread_join(thread1, NULL);
|
||||
if (thread2_started)
|
||||
pthread_join(thread2, NULL);
|
||||
if (ipfs_node3 != NULL)
|
||||
ipfs_node_free(ipfs_node3);
|
||||
if (peer_id_1 != NULL)
|
||||
free(peer_id_1);
|
||||
if (peer_id_2 != NULL)
|
||||
free(peer_id_2);
|
||||
if (peer_id_3 != NULL)
|
||||
free(peer_id_3);
|
||||
if (ma_vector2 != NULL) {
|
||||
libp2p_utils_vector_free(ma_vector2);
|
||||
}
|
||||
if (ma_vector3 != NULL) {
|
||||
libp2p_utils_vector_free(ma_vector3);
|
||||
}
|
||||
if (node != NULL)
|
||||
ipfs_hashtable_node_free(node);
|
||||
if (result_node != NULL)
|
||||
ipfs_hashtable_node_free(result_node);
|
||||
libp2p_logger_free();
|
||||
return retVal;
|
||||
|
||||
}
|
||||
|
|
|
@ -70,6 +70,7 @@ const char* names[] = {
|
|||
"test_routing_supernode_get_value",
|
||||
"test_routing_supernode_get_remote_value",
|
||||
"test_routing_retrieve_file_third_party",
|
||||
"test_routing_retrieve_large_file",
|
||||
"test_unixfs_encode_decode",
|
||||
"test_unixfs_encode_smallfile",
|
||||
"test_ping",
|
||||
|
@ -118,6 +119,7 @@ int (*funcs[])(void) = {
|
|||
test_routing_supernode_get_value,
|
||||
test_routing_supernode_get_remote_value,
|
||||
test_routing_retrieve_file_third_party,
|
||||
test_routing_retrieve_large_file,
|
||||
test_unixfs_encode_decode,
|
||||
test_unixfs_encode_smallfile,
|
||||
test_ping,
|
||||
|
|
Loading…
Reference in a new issue