refactoring + beginning of file transfer
Now attempting to use kademlia to find a hash, and NodeIO to transfer the file
This commit is contained in:
parent
93c4988f90
commit
e8b8d06f24
20 changed files with 357 additions and 90 deletions
|
@ -32,7 +32,7 @@ void *ipfs_null_connection (void *ptr)
|
||||||
// TODO: multistream + secio + message.
|
// TODO: multistream + secio + message.
|
||||||
// TODO: when should we exit the for loop and disconnect?
|
// TODO: when should we exit the for loop and disconnect?
|
||||||
|
|
||||||
struct SecureSession secure_session;
|
struct SessionContext secure_session;
|
||||||
secure_session.insecure_stream = libp2p_net_multistream_stream_new(connection_param->socket);
|
secure_session.insecure_stream = libp2p_net_multistream_stream_new(connection_param->socket);
|
||||||
secure_session.default_stream = secure_session.insecure_stream;
|
secure_session.default_stream = secure_session.insecure_stream;
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ int ipfs_ping (int argc, char **argv)
|
||||||
size_t results_size = 0;
|
size_t results_size = 0;
|
||||||
int port = 0;
|
int port = 0;
|
||||||
char* ip = NULL;
|
char* ip = NULL;
|
||||||
struct SecureSession session;
|
struct SessionContext session;
|
||||||
|
|
||||||
// read the configuration
|
// read the configuration
|
||||||
struct FSRepo* fs_repo;
|
struct FSRepo* fs_repo;
|
||||||
|
|
|
@ -178,6 +178,10 @@ int ipfs_import_print_node_results(const struct Node* node, const char* file_nam
|
||||||
/**
|
/**
|
||||||
* Creates a node based on an incoming file or directory
|
* Creates a node based on an incoming file or directory
|
||||||
* NOTE: this can be called recursively for directories
|
* NOTE: this can be called recursively for directories
|
||||||
|
* NOTE: When this function completes, parent_node will be either:
|
||||||
|
* 1) the complete file, in the case of a small file (<256k-ish)
|
||||||
|
* 2) a node with links to the various pieces of a large file
|
||||||
|
* 3) a node with links to files and directories if 'fileName' is a directory
|
||||||
* @param root_dir the directory for where to look for the file
|
* @param root_dir the directory for where to look for the file
|
||||||
* @param file_name the file (or directory) to import
|
* @param file_name the file (or directory) to import
|
||||||
* @param parent_node the root node (has links to others in case this is a large file and is split)
|
* @param parent_node the root node (has links to others in case this is a large file and is split)
|
||||||
|
|
|
@ -1,30 +1,32 @@
|
||||||
#ifndef DAEMON_H
|
#ifndef DAEMON_H
|
||||||
#define DAEMON_H
|
#define DAEMON_H
|
||||||
#include <stdint.h>
|
|
||||||
|
|
||||||
#define MAX 5
|
#include <stdint.h>
|
||||||
#define CONNECTIONS 50
|
|
||||||
|
|
||||||
struct null_connection_params {
|
#define MAX 5
|
||||||
int socket;
|
#define CONNECTIONS 50
|
||||||
int *count;
|
|
||||||
struct IpfsNode* local_node;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct null_listen_params {
|
struct null_connection_params {
|
||||||
uint32_t ipv4;
|
int socket;
|
||||||
uint16_t port;
|
int *count;
|
||||||
};
|
struct IpfsNode* local_node;
|
||||||
|
};
|
||||||
|
|
||||||
struct IpfsNodeListenParams {
|
struct null_listen_params {
|
||||||
uint32_t ipv4;
|
uint32_t ipv4;
|
||||||
uint16_t port;
|
uint16_t port;
|
||||||
struct IpfsNode* local_node;
|
};
|
||||||
};
|
|
||||||
|
struct IpfsNodeListenParams {
|
||||||
|
uint32_t ipv4;
|
||||||
|
uint16_t port;
|
||||||
|
struct IpfsNode* local_node;
|
||||||
|
};
|
||||||
|
|
||||||
|
void *ipfs_null_connection (void *ptr);
|
||||||
|
void *ipfs_null_listen (void *ptr);
|
||||||
|
int ipfs_daemon (int argc, char **argv);
|
||||||
|
int ipfs_daemon_start(char* repo_path);
|
||||||
|
int ipfs_ping (int argc, char **argv);
|
||||||
|
|
||||||
void *ipfs_null_connection (void *ptr);
|
|
||||||
void *ipfs_null_listen (void *ptr);
|
|
||||||
int ipfs_daemon (int argc, char **argv);
|
|
||||||
int ipfs_daemon_start(char* repo_path);
|
|
||||||
int ipfs_ping (int argc, char **argv);
|
|
||||||
#endif // DAEMON_H
|
#endif // DAEMON_H
|
||||||
|
|
|
@ -5,16 +5,21 @@
|
||||||
#include "ipfs/repo/fsrepo/fs_repo.h"
|
#include "ipfs/repo/fsrepo/fs_repo.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a node based on an incoming file
|
* Creates a node based on an incoming file or directory
|
||||||
* @param root the root directory
|
* NOTE: this can be called recursively for directories
|
||||||
* @param file_name the file to import (could contain a directory)
|
* NOTE: When this function completes, parent_node will be either:
|
||||||
* @param node the root node (could have links to others)
|
* 1) the complete file, in the case of a small file (<256k-ish)
|
||||||
* @param fs_repo the repo to use
|
* 2) a node with links to the various pieces of a large file
|
||||||
* @param bytes_written the number of bytes written to disk
|
* 3) a node with links to files and directories if 'fileName' is a directory
|
||||||
* @param recursive true(1) if you want to include files and directories
|
* @param root_dir the directory for where to look for the file
|
||||||
|
* @param file_name the file (or directory) to import
|
||||||
|
* @param parent_node the root node (has links to others in case this is a large file and is split)
|
||||||
|
* @param fs_repo the ipfs repository
|
||||||
|
* @param bytes_written number of bytes written to disk
|
||||||
|
* @param recursive true if we should navigate directories
|
||||||
* @returns true(1) on success
|
* @returns true(1) on success
|
||||||
*/
|
*/
|
||||||
int ipfs_import_file(const char* root, const char* fileName, struct Node** node, struct FSRepo* fs_repo, size_t* bytes_written, int recursive);
|
int ipfs_import_file(const char* root, const char* fileName, struct Node** parent_node, struct FSRepo* fs_repo, size_t* bytes_written, int recursive);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* called from the command line
|
* called from the command line
|
||||||
|
|
|
@ -30,6 +30,7 @@ struct Node
|
||||||
struct NodeLink* head_link;
|
struct NodeLink* head_link;
|
||||||
// not saved in protobuf
|
// not saved in protobuf
|
||||||
unsigned char* encoded;
|
unsigned char* encoded;
|
||||||
|
// a base32 representation of the multihash
|
||||||
unsigned char* hash;
|
unsigned char* hash;
|
||||||
size_t hash_size;
|
size_t hash_size;
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
#ifndef addresses_h
|
#ifndef addresses_h
|
||||||
#define addresses_h
|
#define addresses_h
|
||||||
|
|
||||||
|
#include "libp2p/utils/linked_list.h"
|
||||||
#include "swarm.h"
|
#include "swarm.h"
|
||||||
|
|
||||||
struct Addresses {
|
struct Addresses {
|
||||||
struct SwarmAddresses* swarm;
|
struct Libp2pLinkedList* swarm_head;
|
||||||
char* api;
|
char* api;
|
||||||
char* gateway;
|
char* gateway;
|
||||||
};
|
};
|
||||||
|
|
|
@ -34,17 +34,42 @@ struct s_ipfs_routing {
|
||||||
int (*GetValue) (struct s_ipfs_routing*, char*, size_t, void**, size_t*);
|
int (*GetValue) (struct s_ipfs_routing*, char*, size_t, void**, size_t*);
|
||||||
/**
|
/**
|
||||||
* Find a provider
|
* Find a provider
|
||||||
|
* @param 1 the context
|
||||||
|
* @param 2 the information that is being looked for
|
||||||
|
* @param 3 the size of param 2
|
||||||
|
* @param 4 the information found
|
||||||
|
* @param 5 the size of the information found
|
||||||
*/
|
*/
|
||||||
int (*FindProviders) (struct s_ipfs_routing*, char*, size_t, void*, size_t*);
|
int (*FindProviders) (struct s_ipfs_routing*, char*, size_t, void*, size_t*);
|
||||||
/**
|
/**
|
||||||
* Find a peer
|
* Find a peer
|
||||||
|
* @param 1 the context
|
||||||
|
* @param 2 the peer to look for
|
||||||
|
* @param 3 the size of the peer char array
|
||||||
|
* @param 4 the results
|
||||||
|
* @param 5 the size of the results
|
||||||
*/
|
*/
|
||||||
int (*FindPeer) (struct s_ipfs_routing*, char*, size_t, void*, size_t*);
|
int (*FindPeer) (struct s_ipfs_routing*, char*, size_t, void*, size_t*);
|
||||||
int (*Provide) (struct s_ipfs_routing*, char*);
|
|
||||||
/**
|
/**
|
||||||
* Ping this instance
|
* Announce to the network that this host can provide this key
|
||||||
|
* @param 1 the context
|
||||||
|
* @param 2 the key
|
||||||
|
* @param 3 the key size
|
||||||
|
* @returns true(1) on success, otherwise false(0)
|
||||||
|
*/
|
||||||
|
int (*Provide) (struct s_ipfs_routing*, char*, size_t);
|
||||||
|
/**
|
||||||
|
* Ping
|
||||||
|
* @param routing the context
|
||||||
|
* @param message the message
|
||||||
|
* @returns true(1) on success, otherwise false(0)
|
||||||
|
*/
|
||||||
|
int (*Ping) (struct s_ipfs_routing*, struct Libp2pMessage*);
|
||||||
|
/**
|
||||||
|
* Get everything going
|
||||||
|
* @param routing the context
|
||||||
|
* @returns true(1) on success, otherwise false(0)
|
||||||
*/
|
*/
|
||||||
int (*Ping) (struct s_ipfs_routing*, struct Libp2pMessage* message);
|
|
||||||
int (*Bootstrap) (struct s_ipfs_routing*);
|
int (*Bootstrap) (struct s_ipfs_routing*);
|
||||||
};
|
};
|
||||||
typedef struct s_ipfs_routing ipfs_routing;
|
typedef struct s_ipfs_routing ipfs_routing;
|
||||||
|
@ -59,3 +84,5 @@ ipfs_routing* ipfs_routing_new_kademlia(struct IpfsNode* local_node, struct RsaP
|
||||||
int ipfs_routing_generic_put_value (ipfs_routing* offlineRouting, char *key, size_t key_size, void *val, size_t vlen);
|
int ipfs_routing_generic_put_value (ipfs_routing* offlineRouting, char *key, size_t key_size, void *val, size_t vlen);
|
||||||
int ipfs_routing_generic_get_value (ipfs_routing* offlineRouting, char *key, size_t key_size, void **val, size_t *vlen);
|
int ipfs_routing_generic_get_value (ipfs_routing* offlineRouting, char *key, size_t key_size, void **val, size_t *vlen);
|
||||||
|
|
||||||
|
// supernode
|
||||||
|
int ipfs_routing_supernode_parse_provider(const unsigned char* in, struct Libp2pLinkedList** multiaddresses);
|
||||||
|
|
|
@ -17,15 +17,18 @@ int repo_config_addresses_new(struct Addresses** addresses, char* api, char* gat
|
||||||
if (*addresses == NULL)
|
if (*addresses == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
struct Addresses* addr = *addresses;
|
||||||
// allocate memory to store api and gateway
|
// allocate memory to store api and gateway
|
||||||
(*addresses)->api = alloc_and_copy(api);
|
addr->api = alloc_and_copy(api);
|
||||||
(*addresses)->gateway = alloc_and_copy(gateway);
|
addr->gateway = alloc_and_copy(gateway);
|
||||||
if ( (*addresses)->api == NULL || (*addresses)->gateway == NULL)
|
if ( addr->api == NULL || addr->gateway == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// allocate memory for swarm_addresses
|
// allocate memory for swarm_addresses
|
||||||
if (repo_config_swarm_address_new(&((*addresses)->swarm)) == 0)
|
//if (repo_config_swarm_address_new(&((*addresses)->swarm)) == 0)
|
||||||
return 0;
|
// return 0;
|
||||||
|
// this is now allocated when it is filled
|
||||||
|
addr->swarm_head = NULL;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -33,7 +36,7 @@ int repo_config_addresses_new(struct Addresses** addresses, char* api, char* gat
|
||||||
int repo_config_addresses_free(struct Addresses* addresses) {
|
int repo_config_addresses_free(struct Addresses* addresses) {
|
||||||
free(addresses->api);
|
free(addresses->api);
|
||||||
free(addresses->gateway);
|
free(addresses->gateway);
|
||||||
repo_config_swarm_address_free(addresses->swarm);
|
libp2p_utils_linked_list_free(addresses->swarm_head);
|
||||||
free(addresses);
|
free(addresses);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
|
#include "libp2p/utils/linked_list.h"
|
||||||
#include "ipfs/repo/config/config.h"
|
#include "ipfs/repo/config/config.h"
|
||||||
#include "ipfs/os/utils.h"
|
#include "ipfs/os/utils.h"
|
||||||
#include "ipfs/repo/config/bootstrap_peers.h"
|
#include "ipfs/repo/config/bootstrap_peers.h"
|
||||||
|
@ -96,10 +97,14 @@ int ipfs_repo_config_init(struct RepoConfig* config, unsigned int num_bits_for_k
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// swarm addresses
|
// swarm addresses
|
||||||
char** address_array = (char * []){ "/ip4/0.0.0.0/tcp/4001", "/ip6/::/tcp/4001" };
|
const char* addr1 = "/ip4/0.0.0.0/tcp/4001";
|
||||||
retVal = repo_config_swarm_address_init(config->addresses->swarm, address_array, 2);
|
const char* addr2 = "/ip6/::/tcp/4001";
|
||||||
if (retVal == 0)
|
config->addresses->swarm_head = libp2p_utils_linked_list_new();
|
||||||
return 0;
|
config->addresses->swarm_head->item = malloc(strlen(addr1) + 1);
|
||||||
|
strcpy(config->addresses->swarm_head->item, addr1);
|
||||||
|
config->addresses->swarm_head->next = libp2p_utils_linked_list_new();
|
||||||
|
config->addresses->swarm_head->next->item = malloc(strlen(addr2) + 1);
|
||||||
|
strcpy(config->addresses->swarm_head->next->item, addr2);
|
||||||
|
|
||||||
config->discovery.mdns.enabled = 1;
|
config->discovery.mdns.enabled = 1;
|
||||||
config->discovery.mdns.interval = 10;
|
config->discovery.mdns.interval = 10;
|
||||||
|
|
|
@ -67,12 +67,14 @@ int repo_config_write_config_file(char* full_filename, struct RepoConfig* config
|
||||||
fprintf(out_file, " \"BloomFilterSize\": %d\n", config->datastore->bloom_filter_size);
|
fprintf(out_file, " \"BloomFilterSize\": %d\n", config->datastore->bloom_filter_size);
|
||||||
fprintf(out_file, " },\n \"Addresses\": {\n");
|
fprintf(out_file, " },\n \"Addresses\": {\n");
|
||||||
fprintf(out_file, " \"Swarm\": [\n");
|
fprintf(out_file, " \"Swarm\": [\n");
|
||||||
for(int i = 0; i < config->addresses->swarm->num_addresses; i++) {
|
struct Libp2pLinkedList* current = config->addresses->swarm_head;
|
||||||
fprintf(out_file, " \"%s\"", config->addresses->swarm->addresses[i]);
|
while (current != NULL) {
|
||||||
if (i != (config->addresses->swarm->num_addresses - 1))
|
fprintf(out_file, " \"%s\"", (char*)current->item);
|
||||||
fprintf(out_file, ",\n");
|
if (current->next == NULL)
|
||||||
else
|
|
||||||
fprintf(out_file, "\n");
|
fprintf(out_file, "\n");
|
||||||
|
else
|
||||||
|
fprintf(out_file, ",\n");
|
||||||
|
current = current->next;
|
||||||
}
|
}
|
||||||
fprintf(out_file, " ],\n");
|
fprintf(out_file, " ],\n");
|
||||||
fprintf(out_file, " \"API\": \"%s\",\n", config->addresses->api);
|
fprintf(out_file, " \"API\": \"%s\",\n", config->addresses->api);
|
||||||
|
@ -285,28 +287,41 @@ int _find_token(const char* data, const jsmntok_t* tokens, int tok_length, int s
|
||||||
* @param tokens the array of tokens
|
* @param tokens the array of tokens
|
||||||
* @param tok_length the number of tokens
|
* @param tok_length the number of tokens
|
||||||
* @param search_from start search from this token onward
|
* @param search_from start search from this token onward
|
||||||
* @param tag what to search for
|
* @param tag what to search for (NOTE: If null, read from search_from)
|
||||||
* @param result where to put the result. NOTE: allocates memory that must be freed
|
* @param result where to put the result. NOTE: allocates memory that must be freed
|
||||||
* @returns true(1) on success
|
* @returns true(1) on success
|
||||||
*/
|
*/
|
||||||
int _get_json_string_value(char* data, const jsmntok_t* tokens, int tok_length, int search_from, const char* tag, char** result) {
|
int _get_json_string_value(char* data, const jsmntok_t* tokens, int tok_length, int search_from, const char* tag, char** result) {
|
||||||
int pos = _find_token(data, tokens, tok_length, search_from, tag);
|
int pos = 0;
|
||||||
if (pos < 0)
|
jsmntok_t* curr_token = NULL;
|
||||||
|
|
||||||
|
if (tag == NULL) {
|
||||||
|
pos = search_from;
|
||||||
|
if (pos >= 0)
|
||||||
|
curr_token = (jsmntok_t*)&tokens[pos];
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
pos = _find_token(data, tokens, tok_length, search_from, tag);
|
||||||
|
if (pos >= 0)
|
||||||
|
curr_token = (jsmntok_t*)&tokens[pos + 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (curr_token == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
jsmntok_t curr_token = tokens[pos+1];
|
|
||||||
if (curr_token.type == JSMN_PRIMITIVE) {
|
if (curr_token->type == JSMN_PRIMITIVE) {
|
||||||
// a null
|
// a null
|
||||||
*result = NULL;
|
*result = NULL;
|
||||||
}
|
}
|
||||||
if (curr_token.type != JSMN_STRING)
|
if (curr_token->type != JSMN_STRING)
|
||||||
return 0;
|
return 0;
|
||||||
// allocate memory
|
// allocate memory
|
||||||
int str_len = curr_token.end - curr_token.start;
|
int str_len = curr_token->end - curr_token->start;
|
||||||
*result = malloc(sizeof(char) * str_len + 1);
|
*result = malloc(sizeof(char) * str_len + 1);
|
||||||
if (*result == NULL)
|
if (*result == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
// copy in the string
|
// copy in the string
|
||||||
strncpy(*result, &data[curr_token.start], str_len);
|
strncpy(*result, &data[curr_token->start], str_len);
|
||||||
(*result)[str_len] = 0;
|
(*result)[str_len] = 0;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -400,6 +415,34 @@ int fs_repo_open_config(struct FSRepo* repo) {
|
||||||
_get_json_int_value(data, tokens, num_tokens, curr_pos, "HashOnRead", &repo->config->datastore->hash_on_read);
|
_get_json_int_value(data, tokens, num_tokens, curr_pos, "HashOnRead", &repo->config->datastore->hash_on_read);
|
||||||
_get_json_int_value(data, tokens, num_tokens, curr_pos, "BloomFilterSize", &repo->config->datastore->bloom_filter_size);
|
_get_json_int_value(data, tokens, num_tokens, curr_pos, "BloomFilterSize", &repo->config->datastore->bloom_filter_size);
|
||||||
|
|
||||||
|
// get addresses. First is Swarm array, then Api, then Gateway
|
||||||
|
curr_pos = _find_token(data, tokens, num_tokens, curr_pos, "Addresses");
|
||||||
|
if (curr_pos < 0) {
|
||||||
|
free(data);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
// get swarm addresses
|
||||||
|
int swarm_pos = _find_token(data, tokens, num_tokens, curr_pos, "Swarm") + 1;
|
||||||
|
if (tokens[swarm_pos].type != JSMN_ARRAY)
|
||||||
|
return 0;
|
||||||
|
int swarm_size = tokens[swarm_pos].size;
|
||||||
|
swarm_pos++;
|
||||||
|
repo->config->addresses->swarm_head = NULL;
|
||||||
|
struct Libp2pLinkedList* last = NULL;
|
||||||
|
for(int i = 0; i < swarm_size; i++) {
|
||||||
|
struct Libp2pLinkedList* current = libp2p_utils_linked_list_new();
|
||||||
|
if (!_get_json_string_value(data, tokens, num_tokens, swarm_pos + i, NULL, (char**)¤t->item))
|
||||||
|
break;
|
||||||
|
if (repo->config->addresses->swarm_head == NULL) {
|
||||||
|
repo->config->addresses->swarm_head = current;
|
||||||
|
} else {
|
||||||
|
last->next = current;
|
||||||
|
}
|
||||||
|
last = current;
|
||||||
|
}
|
||||||
|
_get_json_string_value(data, tokens, num_tokens, curr_pos, "API", &repo->config->addresses->api);
|
||||||
|
_get_json_string_value(data, tokens, num_tokens, curr_pos, "Gateway", &repo->config->addresses->gateway);
|
||||||
|
|
||||||
// free the memory used reading the json file
|
// free the memory used reading the json file
|
||||||
free(data);
|
free(data);
|
||||||
free(priv_key_base64);
|
free(priv_key_base64);
|
||||||
|
|
|
@ -3,6 +3,8 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Routing using Kademlia and DHT
|
* Routing using Kademlia and DHT
|
||||||
|
*
|
||||||
|
* The go version has "supernode" which is similar to this:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -32,8 +34,17 @@ int ipfs_routing_kademlia_get_value(struct s_ipfs_routing* routing, char* key, s
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Find a provider
|
* Find a provider
|
||||||
|
* @param routing the context
|
||||||
|
* @param key the key to what we're looking for
|
||||||
|
* @param key_size the size of the key
|
||||||
|
* @param results the results
|
||||||
|
* @param results_size the size of the results buffer
|
||||||
|
* @returns true(1) on success, otherwise false(0)
|
||||||
*/
|
*/
|
||||||
int ipfs_routing_kademlia_find_providers(struct s_ipfs_routing* routing, char* param1, size_t param2, void* param3, size_t* param4) {
|
int ipfs_routing_kademlia_find_providers(struct s_ipfs_routing* routing, char* key, size_t key_size, void* results, size_t* results_size) {
|
||||||
|
// see if I can provide it
|
||||||
|
// add my multiaddress if I can
|
||||||
|
// get a list of providers that are closer
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +54,7 @@ int ipfs_routing_kademlia_find_providers(struct s_ipfs_routing* routing, char* p
|
||||||
int ipfs_routing_kademlia_find_peer(struct s_ipfs_routing* routing, char* param1, size_t param2, void* param3, size_t* param4) {
|
int ipfs_routing_kademlia_find_peer(struct s_ipfs_routing* routing, char* param1, size_t param2, void* param3, size_t* param4) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
int ipfs_routing_kademlia_provide(struct s_ipfs_routing* routing, char* param1) {
|
int ipfs_routing_kademlia_provide(struct s_ipfs_routing* routing, char* param1, size_t param2) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,6 +72,12 @@ int ipfs_routing_kademlia_bootstrap(struct s_ipfs_routing* routing) {
|
||||||
}
|
}
|
||||||
|
|
||||||
struct s_ipfs_routing* ipfs_routing_new_kademlia(struct IpfsNode* local_node, struct RsaPrivateKey* private_key, struct Stream* stream) {
|
struct s_ipfs_routing* ipfs_routing_new_kademlia(struct IpfsNode* local_node, struct RsaPrivateKey* private_key, struct Stream* stream) {
|
||||||
|
char* kademlia_id = NULL;
|
||||||
|
// generate kademlia compatible id by getting last 20 chars of peer id
|
||||||
|
if (local_node->identity->peer_id == NULL || strlen(local_node->identity->peer_id) < 20) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
kademlia_id = &local_node->identity->peer_id[strlen(local_node->identity->peer_id)-20];
|
||||||
struct s_ipfs_routing* routing = (struct s_ipfs_routing*)malloc(sizeof(struct s_ipfs_routing));
|
struct s_ipfs_routing* routing = (struct s_ipfs_routing*)malloc(sizeof(struct s_ipfs_routing));
|
||||||
if (routing != NULL) {
|
if (routing != NULL) {
|
||||||
routing->local_node = local_node;
|
routing->local_node = local_node;
|
||||||
|
@ -79,8 +96,8 @@ struct s_ipfs_routing* ipfs_routing_new_kademlia(struct IpfsNode* local_node, st
|
||||||
if (multiaddress_is_ip(address)) {
|
if (multiaddress_is_ip(address)) {
|
||||||
int port = multiaddress_get_ip_port(address);
|
int port = multiaddress_get_ip_port(address);
|
||||||
int family = multiaddress_get_ip_family(address);
|
int family = multiaddress_get_ip_family(address);
|
||||||
start_kademlia(port, family, local_node->identity->peer_id, 10);
|
start_kademlia(port, family, kademlia_id, 10);
|
||||||
}
|
}
|
||||||
|
local_node->routing = routing;
|
||||||
return routing;
|
return routing;
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,7 +64,7 @@ int ipfs_routing_offline_find_peer (ipfs_routing* offlineRouting, char *peer_id,
|
||||||
return ErrOffline;
|
return ErrOffline;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ipfs_routing_offline_provide (ipfs_routing* offlineRouting, char *cid)
|
int ipfs_routing_offline_provide (ipfs_routing* offlineRouting, char *cid, size_t cid_size)
|
||||||
{
|
{
|
||||||
return ErrOffline;
|
return ErrOffline;
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,14 @@ int ipfs_routing_online_find_providers(struct s_ipfs_routing* routing, char* val
|
||||||
int ipfs_routing_online_find_peer(struct s_ipfs_routing* routing, char* val1, size_t val2, void* val3, size_t* val4) {
|
int ipfs_routing_online_find_peer(struct s_ipfs_routing* routing, char* val1, size_t val2, void* val3, size_t* val4) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
int ipfs_routing_online_provide(struct s_ipfs_routing* routing, char* val1) {
|
|
||||||
|
/**
|
||||||
|
* Notify the network that this host can provide this key
|
||||||
|
* @param routing information about this host
|
||||||
|
* @param val1 the key (hash) of the data
|
||||||
|
* @returns true(1) on success, otherwise false
|
||||||
|
*/
|
||||||
|
int ipfs_routing_online_provide(struct s_ipfs_routing* routing, char* val1, size_t val2) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
int ipfs_routing_online_ping(struct s_ipfs_routing* routing, struct Libp2pMessage* message) {
|
int ipfs_routing_online_ping(struct s_ipfs_routing* routing, struct Libp2pMessage* message) {
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
|
#include "../test_helper.h"
|
||||||
#include "ipfs/importer/importer.h"
|
#include "ipfs/importer/importer.h"
|
||||||
#include "ipfs/importer/exporter.h"
|
#include "ipfs/importer/exporter.h"
|
||||||
#include "ipfs/merkledag/merkledag.h"
|
#include "ipfs/merkledag/merkledag.h"
|
||||||
|
@ -7,27 +8,6 @@
|
||||||
#include "mh/multihash.h"
|
#include "mh/multihash.h"
|
||||||
#include "libp2p/crypto/encoding/base58.h"
|
#include "libp2p/crypto/encoding/base58.h"
|
||||||
|
|
||||||
/***
|
|
||||||
* Helper to create a test file in the OS
|
|
||||||
*/
|
|
||||||
int create_file(const char* fileName, unsigned char* bytes, size_t num_bytes) {
|
|
||||||
FILE* file = fopen(fileName, "wb");
|
|
||||||
fwrite(bytes, num_bytes, 1, file);
|
|
||||||
fclose(file);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int create_bytes(unsigned char* buffer, size_t num_bytes) {
|
|
||||||
int counter = 0;
|
|
||||||
|
|
||||||
for(int i = 0; i < num_bytes; i++) {
|
|
||||||
buffer[i] = counter++;
|
|
||||||
if (counter > 15)
|
|
||||||
counter = 0;
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int test_import_large_file() {
|
int test_import_large_file() {
|
||||||
size_t bytes_size = 1000000; //1mb
|
size_t bytes_size = 1000000; //1mb
|
||||||
unsigned char file_bytes[bytes_size];
|
unsigned char file_bytes[bytes_size];
|
||||||
|
|
|
@ -38,14 +38,14 @@ int test_repo_config_init() {
|
||||||
if (retVal != 0)
|
if (retVal != 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (repoConfig->addresses->swarm->num_addresses != 2)
|
if (repoConfig->addresses->swarm_head == NULL || repoConfig->addresses->swarm_head->next == NULL || repoConfig->addresses->swarm_head->next->next != NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
retVal = strncmp(repoConfig->addresses->swarm->addresses[0], "/ip4/0.0.0.0/tcp/4001", 21);
|
retVal = strcmp((char*)repoConfig->addresses->swarm_head->item, "/ip4/0.0.0.0/tcp/4001");
|
||||||
if (retVal != 0)
|
if (retVal != 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
retVal = strncmp(repoConfig->addresses->swarm->addresses[1], "/ip6/::/tcp/4001", 16);
|
retVal = strcmp((char*)repoConfig->addresses->swarm_head->next->item, "/ip6/::/tcp/4001");
|
||||||
if (retVal != 0)
|
if (retVal != 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
129
test/routing/test_supernode.h
Normal file
129
test/routing/test_supernode.h
Normal file
|
@ -0,0 +1,129 @@
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
#include "../test_helper.h"
|
||||||
|
#include "ipfs/routing/routing.h"
|
||||||
|
#include "ipfs/repo/fsrepo/fs_repo.h"
|
||||||
|
|
||||||
|
void stop_kademlia(void);
|
||||||
|
|
||||||
|
int test_routing_supernode_start() {
|
||||||
|
int retVal = 0;
|
||||||
|
struct FSRepo* fs_repo = NULL;
|
||||||
|
struct IpfsNode* ipfs_node = NULL;
|
||||||
|
struct Stream* stream = NULL;
|
||||||
|
|
||||||
|
if (!drop_build_and_open_repo("/tmp/.ipfs", &fs_repo))
|
||||||
|
goto exit;
|
||||||
|
|
||||||
|
ipfs_node = (struct IpfsNode*)malloc(sizeof(struct IpfsNode));
|
||||||
|
ipfs_node->mode = MODE_ONLINE;
|
||||||
|
ipfs_node->identity = fs_repo->config->identity;
|
||||||
|
ipfs_node->repo = fs_repo;
|
||||||
|
ipfs_node->routing = ipfs_routing_new_kademlia(ipfs_node, &fs_repo->config->identity->private_key, stream);
|
||||||
|
|
||||||
|
if (ipfs_node->routing == NULL)
|
||||||
|
goto exit;
|
||||||
|
|
||||||
|
//TODO ping kademlia
|
||||||
|
|
||||||
|
retVal = 1;
|
||||||
|
exit:
|
||||||
|
if (ipfs_node->routing != NULL)
|
||||||
|
stop_kademlia();
|
||||||
|
return retVal;
|
||||||
|
}
|
||||||
|
|
||||||
|
int test_routing_supernode_get_value() {
|
||||||
|
int retVal = 0;
|
||||||
|
struct FSRepo* fs_repo = NULL;
|
||||||
|
struct IpfsNode* ipfs_node = NULL;
|
||||||
|
struct Stream* stream = NULL;
|
||||||
|
int file_size = 1000;
|
||||||
|
unsigned char bytes[file_size];
|
||||||
|
char* fileName = "temp_file.bin";
|
||||||
|
char* fullFileName = "/tmp/temp_file.bin";
|
||||||
|
struct Node* write_node = NULL;
|
||||||
|
size_t bytes_written = 0;
|
||||||
|
unsigned char base58Hash[100];
|
||||||
|
size_t results_size = 2048;
|
||||||
|
char results_buffer[results_size];
|
||||||
|
|
||||||
|
if (!drop_build_and_open_repo("/tmp/.ipfs", &fs_repo))
|
||||||
|
goto exit;
|
||||||
|
|
||||||
|
ipfs_node = (struct IpfsNode*)malloc(sizeof(struct IpfsNode));
|
||||||
|
ipfs_node->mode = MODE_ONLINE;
|
||||||
|
ipfs_node->identity = fs_repo->config->identity;
|
||||||
|
ipfs_node->repo = fs_repo;
|
||||||
|
ipfs_node->routing = ipfs_routing_new_kademlia(ipfs_node, &fs_repo->config->identity->private_key, stream);
|
||||||
|
|
||||||
|
if (ipfs_node->routing == NULL)
|
||||||
|
goto exit;
|
||||||
|
|
||||||
|
// create a file
|
||||||
|
create_bytes(&bytes[0], file_size);
|
||||||
|
create_file(fullFileName, bytes, file_size);
|
||||||
|
|
||||||
|
// write to ipfs
|
||||||
|
if (ipfs_import_file("/tmp", fileName, &write_node, fs_repo, &bytes_written, 1) == 0) {
|
||||||
|
goto exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ipfs_node->routing->Provide(ipfs_node->routing, (char*)write_node->data, write_node->data_size))
|
||||||
|
goto exit;
|
||||||
|
// write_node->hash has the base32 key of the file. Convert this to a base58.
|
||||||
|
if (!ipfs_cid_hash_to_base58(write_node->hash, write_node->hash_size, base58Hash, 100))
|
||||||
|
goto exit;
|
||||||
|
|
||||||
|
// ask the network who can provide this
|
||||||
|
if (!ipfs_node->routing->FindProviders(ipfs_node->routing, (char*)base58Hash, 100, &results_buffer[0], &results_size))
|
||||||
|
goto exit;
|
||||||
|
|
||||||
|
// Q: What should FindProviders have in the results buffer? A: A struct of:
|
||||||
|
// 20 byte id
|
||||||
|
// 4 byte (or 16 byte for ip6) ip address
|
||||||
|
// 2 byte port number
|
||||||
|
// that means we have to attempt a connection and ask for peer ID
|
||||||
|
// TODO: Q: How do we determine ip4 vs ip6?
|
||||||
|
|
||||||
|
struct Libp2pLinkedList* multiaddress_head;
|
||||||
|
// get an IP4 ip and port
|
||||||
|
if (!ipfs_routing_supernode_parse_provider(&results_buffer, &multiaddress_head))
|
||||||
|
goto exit;
|
||||||
|
|
||||||
|
struct Libp2pLinkedList* current_address = multiaddress_head;
|
||||||
|
struct MultiAddress* addr = NULL;
|
||||||
|
while (current_address != NULL) {
|
||||||
|
addr = (struct Multiaddress*)current_address->item;
|
||||||
|
if (multiaddress_is_ip4(addr))
|
||||||
|
break;
|
||||||
|
addr = NULL;
|
||||||
|
current_address = current_address->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (addr == NULL)
|
||||||
|
goto exit;
|
||||||
|
|
||||||
|
// Connect to server
|
||||||
|
char* ip;
|
||||||
|
multiaddress_get_ip_address(addr, &ip);
|
||||||
|
struct Stream* file_stream = libp2p_net_multistream_connect(ip, multiaddress_get_ip_port(addr));
|
||||||
|
|
||||||
|
// Switch from multistream to NodeIO
|
||||||
|
if (!libp2p_net_multistream_upgrade(file_stream, "/NodeIO/1.0.0"))
|
||||||
|
goto exit;
|
||||||
|
|
||||||
|
// Ask for file
|
||||||
|
struct Node* node = libp2p_nodeio_get(file_stream, base58Hash, 100);
|
||||||
|
if (node == NULL)
|
||||||
|
goto exit;
|
||||||
|
|
||||||
|
retVal = 1;
|
||||||
|
exit:
|
||||||
|
if (ipfs_node->routing != NULL)
|
||||||
|
stop_kademlia();
|
||||||
|
if (fs_repo != NULL)
|
||||||
|
ipfs_repo_fsrepo_free(fs_repo);
|
||||||
|
return retVal;
|
||||||
|
|
||||||
|
}
|
|
@ -6,6 +6,31 @@
|
||||||
#include "ipfs/repo/fsrepo/fs_repo.h"
|
#include "ipfs/repo/fsrepo/fs_repo.h"
|
||||||
#include "ipfs/os/utils.h"
|
#include "ipfs/os/utils.h"
|
||||||
|
|
||||||
|
/***
|
||||||
|
* Helper to create a test file in the OS
|
||||||
|
*/
|
||||||
|
int create_file(const char* fileName, unsigned char* bytes, size_t num_bytes) {
|
||||||
|
FILE* file = fopen(fileName, "wb");
|
||||||
|
fwrite(bytes, num_bytes, 1, file);
|
||||||
|
fclose(file);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/***
|
||||||
|
* Create a buffer with some data
|
||||||
|
*/
|
||||||
|
int create_bytes(unsigned char* buffer, size_t num_bytes) {
|
||||||
|
int counter = 0;
|
||||||
|
|
||||||
|
for(int i = 0; i < num_bytes; i++) {
|
||||||
|
buffer[i] = counter++;
|
||||||
|
if (counter > 15)
|
||||||
|
counter = 0;
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int remove_directory(const char *path)
|
int remove_directory(const char *path)
|
||||||
{
|
{
|
||||||
DIR *d = opendir(path);
|
DIR *d = opendir(path);
|
||||||
|
|
|
@ -9,3 +9,18 @@
|
||||||
int drop_and_build_repository(const char* dir);
|
int drop_and_build_repository(const char* dir);
|
||||||
|
|
||||||
int drop_build_and_open_repo(const char* path, struct FSRepo** fs_repo);
|
int drop_build_and_open_repo(const char* path, struct FSRepo** fs_repo);
|
||||||
|
|
||||||
|
/***
|
||||||
|
* Helper to create a test file in the OS
|
||||||
|
* @pram fileName the resultant file name
|
||||||
|
* @param bytes the data that goes in the file
|
||||||
|
* @param num_bytes the size of the buffer
|
||||||
|
*/
|
||||||
|
int create_file(const char* fileName, unsigned char* bytes, size_t num_bytes);
|
||||||
|
|
||||||
|
/***
|
||||||
|
* Create a buffer with some bytes
|
||||||
|
* @param buffer where to put the bytes
|
||||||
|
* @param num_bytes how much to fill of the buffer
|
||||||
|
*/
|
||||||
|
int create_bytes(unsigned char* buffer, size_t num_bytes);
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#include "repo/test_repo_config.h"
|
#include "repo/test_repo_config.h"
|
||||||
#include "repo/test_repo_fsrepo.h"
|
#include "repo/test_repo_fsrepo.h"
|
||||||
#include "repo/test_repo_identity.h"
|
#include "repo/test_repo_identity.h"
|
||||||
|
#include "routing/test_supernode.h"
|
||||||
#include "storage/test_ds_helper.h"
|
#include "storage/test_ds_helper.h"
|
||||||
#include "storage/test_datastore.h"
|
#include "storage/test_datastore.h"
|
||||||
#include "storage/test_blocks.h"
|
#include "storage/test_blocks.h"
|
||||||
|
@ -36,6 +37,7 @@ const char* names[] = {
|
||||||
"test_repo_config_identity_new",
|
"test_repo_config_identity_new",
|
||||||
"test_repo_config_identity_private_key",
|
"test_repo_config_identity_private_key",
|
||||||
"test_repo_fsrepo_write_read_block",
|
"test_repo_fsrepo_write_read_block",
|
||||||
|
"test_routing_supernode_start",
|
||||||
"test_get_init_command",
|
"test_get_init_command",
|
||||||
"test_import_small_file",
|
"test_import_small_file",
|
||||||
"test_import_large_file",
|
"test_import_large_file",
|
||||||
|
@ -72,6 +74,7 @@ int (*funcs[])(void) = {
|
||||||
test_repo_config_identity_new,
|
test_repo_config_identity_new,
|
||||||
test_repo_config_identity_private_key,
|
test_repo_config_identity_private_key,
|
||||||
test_repo_fsrepo_write_read_block,
|
test_repo_fsrepo_write_read_block,
|
||||||
|
test_routing_supernode_start,
|
||||||
test_get_init_command,
|
test_get_init_command,
|
||||||
test_import_small_file,
|
test_import_small_file,
|
||||||
test_import_large_file,
|
test_import_large_file,
|
||||||
|
|
Loading…
Reference in a new issue