debugging daemon
This commit is contained in:
parent
6c64b55176
commit
8feb946087
10 changed files with 161 additions and 24 deletions
1
Makefile
1
Makefile
|
@ -3,6 +3,7 @@ DEBUG = true
|
|||
export DEBUG
|
||||
|
||||
all:
|
||||
cd ../c-libp2p; make all;
|
||||
cd blocks; make all;
|
||||
cd cid; make all;
|
||||
cd cmd; make all;
|
||||
|
|
|
@ -30,6 +30,27 @@ void *ipfs_bootstrap_swarm(void* param) {
|
|||
return (void*)1;
|
||||
}
|
||||
|
||||
/***
|
||||
* Announce to the network all of the files that I have in storage
|
||||
* @param local_node the context
|
||||
*/
|
||||
void ipfs_bootstrap_announce_files(struct IpfsNode* local_node) {
|
||||
struct Datastore* db = local_node->repo->config->datastore;
|
||||
if (!db->datastore_cursor_open(db))
|
||||
return;
|
||||
unsigned char* key = NULL;
|
||||
int key_size = 0;
|
||||
enum DatastoreCursorOp op = CURSOR_FIRST;
|
||||
while (db->datastore_cursor_get(&key, &key_size, NULL, 0, op, db)) {
|
||||
local_node->routing->Provide(local_node->routing, (char*)key, key_size);
|
||||
// TODO announce the file
|
||||
op = CURSOR_NEXT;
|
||||
free(key);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/***
|
||||
* Listen for connections on the API port (usually 5001)
|
||||
* NOTE: This fills in the IpfsNode->routing struct
|
||||
|
@ -40,5 +61,6 @@ void *ipfs_bootstrap_swarm(void* param) {
|
|||
void *ipfs_bootstrap_routing(void* param) {
|
||||
struct IpfsNode* local_node = (struct IpfsNode*)param;
|
||||
local_node->routing = ipfs_routing_new_kademlia(local_node, &local_node->identity->private_key, NULL);
|
||||
ipfs_bootstrap_announce_files(local_node);
|
||||
return (void*)2;
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ int ipfs_daemon_start(char* repo_path) {
|
|||
struct IpfsNode local_node;
|
||||
local_node.mode = MODE_ONLINE;
|
||||
local_node.peerstore = libp2p_peerstore_new();
|
||||
local_node.providerstore = libp2p_providerstore_new();
|
||||
local_node.repo = fs_repo;
|
||||
local_node.identity = fs_repo->config->identity;
|
||||
|
||||
|
@ -47,12 +48,10 @@ int ipfs_daemon_start(char* repo_path) {
|
|||
}
|
||||
|
||||
// create pthread for the API
|
||||
ipfs_bootstrap_routing(&local_node);
|
||||
/*
|
||||
//ipfs_bootstrap_routing(&local_node);
|
||||
if (pthread_create(&work_pths[count_pths++], NULL, ipfs_bootstrap_routing, &local_node)) {
|
||||
fprintf(stderr, "Error creating thread for routing\n");
|
||||
}
|
||||
*/
|
||||
|
||||
fprintf(stderr, "Daemon is ready\n");
|
||||
|
||||
|
|
|
@ -342,8 +342,8 @@ int ipfs_import_files(int argc, char** argv) {
|
|||
struct FileList* current = first;
|
||||
while (current != NULL) {
|
||||
struct Node* directory_entry = NULL;
|
||||
char* path;
|
||||
char* filename;
|
||||
char* path = NULL;
|
||||
char* filename = NULL;
|
||||
os_utils_split_filename(current->file_name, &path, &filename);
|
||||
size_t bytes_written = 0;
|
||||
retVal = ipfs_import_file(NULL, current->file_name, &directory_entry, fs_repo, &bytes_written, recursive);
|
||||
|
@ -351,7 +351,8 @@ int ipfs_import_files(int argc, char** argv) {
|
|||
ipfs_import_print_node_results(directory_entry, filename);
|
||||
// cleanup
|
||||
ipfs_node_free(directory_entry);
|
||||
free(path);
|
||||
if (path != NULL)
|
||||
free(path);
|
||||
free(filename);
|
||||
current = current->next;
|
||||
}
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
|
||||
//const char* datastore_default_directory = "datastore";
|
||||
|
||||
enum DatastoreCursorOp { CURSOR_FIRST, CURSOR_NEXT };
|
||||
|
||||
struct Datastore {
|
||||
char* type;
|
||||
char* path;
|
||||
|
@ -21,9 +23,15 @@ struct Datastore {
|
|||
int (*datastore_open)(int argc, char** argv, struct Datastore* datastore);
|
||||
int (*datastore_close)(struct Datastore* datastore);
|
||||
int (*datastore_put)(const unsigned char* key, size_t key_size, unsigned char* data, size_t data_length, const struct Datastore* datastore);
|
||||
int (*datastore_get)(const char* key, size_t key_size, unsigned char* data, size_t max_data_length, size_t* data_length, const struct Datastore* datastore);
|
||||
// a handle to the datastore "context" used by the datastore
|
||||
void* handle;
|
||||
int (*datastore_get)(const char* key, size_t key_size,
|
||||
unsigned char* data, size_t max_data_length, size_t* data_length,
|
||||
const struct Datastore* datastore);
|
||||
int (*datastore_cursor_open)(struct Datastore* datastore);
|
||||
int (*datastore_cursor_close)(struct Datastore* datastore);
|
||||
int (*datastore_cursor_get)(unsigned char** key, int* key_length, unsigned char** value, int* value_length, enum DatastoreCursorOp op, struct Datastore* datastore);
|
||||
// generic connection and status variables for the datastore
|
||||
void* handle; // a handle to the database
|
||||
void* cursor; // a current cursor
|
||||
};
|
||||
|
||||
/***
|
||||
|
|
19
os/utils.c
19
os/utils.c
|
@ -89,13 +89,18 @@ int os_utils_is_directory(const char* file_name) {
|
|||
int os_utils_split_filename(const char* in, char** path, char** filename) {
|
||||
int len = strlen(in);
|
||||
char* pos = strrchr(in, '/');
|
||||
pos++;
|
||||
*path = (char*)malloc((pos - in) + 1);
|
||||
*filename = (char*)malloc(len - (pos-in) + 1);
|
||||
strncpy(*path, in, pos-in-1);
|
||||
(*path)[pos-in-1] = 0;
|
||||
strcpy(*filename, pos);
|
||||
(*filename)[len - (pos-in)] = 0;
|
||||
if (pos != NULL) {
|
||||
pos++;
|
||||
*path = (char*)malloc((pos - in) + 1);
|
||||
strncpy(*path, in, pos-in-1);
|
||||
(*path)[pos-in-1] = 0;
|
||||
*filename = (char*)malloc(len - (pos-in) + 1);
|
||||
strcpy(*filename, pos);
|
||||
(*filename)[len - (pos-in)] = 0;
|
||||
} else {
|
||||
*filename = (char*)malloc(len+1);
|
||||
strcpy(*filename, in);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,11 @@
|
|||
#include "lmdb.h"
|
||||
#include "ipfs/repo/fsrepo/lmdb_datastore.h"
|
||||
|
||||
struct lmdb_trans_cursor {
|
||||
MDB_txn* transaction;
|
||||
MDB_cursor* cursor;
|
||||
};
|
||||
|
||||
/***
|
||||
* retrieve a record from the database and put in a pre-sized buffer
|
||||
* @param key the key to look for
|
||||
|
@ -164,6 +169,79 @@ int repo_fsrepo_lmdb_close(struct Datastore* datastore) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
int repo_fsrepo_lmdb_cursor_open(struct Datastore* datastore) {
|
||||
if (datastore->handle != NULL) {
|
||||
MDB_env* mdb_env = (MDB_env*)datastore->handle;
|
||||
MDB_dbi mdb_dbi;
|
||||
if (datastore->cursor == NULL ) {
|
||||
datastore->cursor = malloc(sizeof(struct lmdb_trans_cursor));
|
||||
struct lmdb_trans_cursor* cursor = (struct lmdb_trans_cursor*)datastore->cursor;
|
||||
// open transaction
|
||||
if (mdb_txn_begin(mdb_env, NULL, 0, &cursor->transaction) != 0)
|
||||
return 0;
|
||||
MDB_txn* mdb_txn = (MDB_txn*)cursor->transaction;
|
||||
if (mdb_dbi_open(mdb_txn, NULL, MDB_DUPSORT, &mdb_dbi) != 0) {
|
||||
mdb_txn_commit(mdb_txn);
|
||||
return 0;
|
||||
}
|
||||
// open cursor
|
||||
if (mdb_cursor_open(mdb_txn, mdb_dbi, &cursor->cursor) != 0) {
|
||||
mdb_txn_commit(mdb_txn);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int repo_fsrepo_lmdb_cursor_get(unsigned char** key, int* key_length,
|
||||
unsigned char** value, int* value_length,
|
||||
enum DatastoreCursorOp op, struct Datastore* datastore)
|
||||
{
|
||||
if (datastore->cursor != NULL) {
|
||||
struct lmdb_trans_cursor* tc = (struct lmdb_trans_cursor*)datastore->cursor;
|
||||
MDB_val mdb_key;
|
||||
MDB_val mdb_value;
|
||||
MDB_cursor_op co = MDB_FIRST;
|
||||
if (op == CURSOR_FIRST)
|
||||
co = MDB_FIRST;
|
||||
else if (op == CURSOR_NEXT)
|
||||
co = MDB_NEXT;
|
||||
if (mdb_cursor_get(tc->cursor, &mdb_key, &mdb_value, co) != 0) {
|
||||
return 0;
|
||||
}
|
||||
*key = (unsigned char*)malloc(mdb_key.mv_size);
|
||||
memcpy(*key, mdb_key.mv_data, mdb_key.mv_size);
|
||||
*key_length = mdb_key.mv_size;
|
||||
if (value != NULL) { // don't do this if a null is passed in, time saver
|
||||
*value = (unsigned char*)malloc(mdb_value.mv_size);
|
||||
memcpy(*value, mdb_value.mv_data, mdb_value.mv_size);
|
||||
*value_length = mdb_value.mv_size;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
/**
|
||||
* Close an existing cursor
|
||||
* @param datastore the context
|
||||
* @returns true(1) on success
|
||||
*/
|
||||
int repo_fsrepo_lmdb_cursor_close(struct Datastore* datastore) {
|
||||
if (datastore->cursor != NULL) {
|
||||
struct lmdb_trans_cursor* cursor = (struct lmdb_trans_cursor*)datastore->cursor;
|
||||
if (cursor->cursor != NULL) {
|
||||
mdb_cursor_close(cursor->cursor);
|
||||
mdb_txn_commit(cursor->transaction);
|
||||
free(cursor);
|
||||
return 1;
|
||||
}
|
||||
free(cursor);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/***
|
||||
* Places the LMDB methods into the datastore's function pointers
|
||||
* @param datastore the datastore to fill
|
||||
|
@ -174,6 +252,10 @@ int repo_fsrepo_lmdb_cast(struct Datastore* datastore) {
|
|||
datastore->datastore_close = &repo_fsrepo_lmdb_close;
|
||||
datastore->datastore_put = &repo_fsrepo_lmdb_put;
|
||||
datastore->datastore_get = &repo_fsrepo_lmdb_get;
|
||||
datastore->datastore_cursor_open = &repo_fsrepo_lmdb_cursor_open;
|
||||
datastore->datastore_cursor_get = &repo_fsrepo_lmdb_cursor_get;
|
||||
datastore->datastore_cursor_close = &repo_fsrepo_lmdb_cursor_close;
|
||||
datastore->cursor = NULL;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,8 @@ int ipfs_routing_kademlia_get_value(struct s_ipfs_routing* routing, char* key, s
|
|||
|
||||
/**
|
||||
* Find a provider that can provide a particular key
|
||||
* NOTE: It will look locally before asking the network
|
||||
* NOTE: It will look locally before asking the network. So
|
||||
* if a peer announced, we already have an answer.
|
||||
*
|
||||
* @param routing the context
|
||||
* @param key the key to what we're looking for
|
||||
|
@ -49,6 +50,8 @@ int ipfs_routing_kademlia_find_providers(struct s_ipfs_routing* routing, char* k
|
|||
*results = libp2p_utils_vector_new(1);
|
||||
struct Libp2pVector* vector = *results;
|
||||
// see if I can provide it
|
||||
// temporarily commented out for testing...
|
||||
/*
|
||||
unsigned char* peer_id = NULL;
|
||||
int peer_id_size = 0;
|
||||
if (libp2p_providerstore_get(routing->local_node->providerstore, (unsigned char*)key, key_size, &peer_id, &peer_id_size)) {
|
||||
|
@ -62,9 +65,23 @@ int ipfs_routing_kademlia_find_providers(struct s_ipfs_routing* routing, char* k
|
|||
current = current->next;
|
||||
}
|
||||
}
|
||||
//TODO: get a list of providers that are closer
|
||||
*/
|
||||
//get a list of providers that are closest
|
||||
if (vector->total == 0) {
|
||||
// ask the network
|
||||
// search requires null terminated key
|
||||
char* key_nt = malloc(key_size + 1);
|
||||
strncpy(key_nt, key, key_size);
|
||||
key_nt[key_size] = 0;
|
||||
struct MultiAddress** list = search_kademlia(key_nt, 3);
|
||||
free(key_nt);
|
||||
if (list != NULL) {
|
||||
int i = 0;
|
||||
while (list[i] != NULL) {
|
||||
struct MultiAddress* current = list[i];
|
||||
libp2p_utils_vector_add(vector, current);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (vector->total == 0) {
|
||||
// we were unable to find it, even on the network
|
||||
|
@ -135,9 +152,7 @@ struct s_ipfs_routing* ipfs_routing_new_kademlia(struct IpfsNode* local_node, st
|
|||
// connect to nodes and listen for connections
|
||||
struct MultiAddress* address = multiaddress_new_from_string(local_node->repo->config->addresses->api);
|
||||
if (multiaddress_is_ip(address)) {
|
||||
int port = multiaddress_get_ip_port(address);
|
||||
int family = multiaddress_get_ip_family(address);
|
||||
start_kademlia(port, family, kademlia_id, 10);
|
||||
start_kademlia_multiaddress(address, kademlia_id, 10);
|
||||
}
|
||||
local_node->routing = routing;
|
||||
return routing;
|
||||
|
|
|
@ -48,13 +48,13 @@ void* start_daemon(void* path) {
|
|||
|
||||
int test_routing_supernode_get_remote_value() {
|
||||
// a remote machine has a file. Let's see if we can get it.
|
||||
// the key is QmXQDbRPsbGdtvDwWvXVXb9TwUTNMcGNfWWnA9oSJJBi5n, which is the email.txt from jmjatlanta.com
|
||||
// the key is QmYAXgX8ARiriupMQsbGXtKdDyGzWry1YV3sycKw1qqmgH, which is the test_file.txt
|
||||
int retVal = 0;
|
||||
struct FSRepo* fs_repo = NULL;
|
||||
struct IpfsNode* ipfs_node = NULL;
|
||||
struct Libp2pPeer this_peer;
|
||||
struct Stream* stream = NULL;
|
||||
const unsigned char* orig_multihash = (unsigned char*)"QmXQDbRPsbGdtvDwWvXVXb9TwUTNMcGNfWWnA9oSJJBi5n";
|
||||
const unsigned char* orig_multihash = (unsigned char*)"QmYAXgX8ARiriupMQsbGXtKdDyGzWry1YV3sycKw1qqmgH";
|
||||
size_t hash_size = 100;
|
||||
unsigned char hash[hash_size];
|
||||
unsigned char* hash_ptr = &hash[0];
|
||||
|
@ -86,8 +86,11 @@ int test_routing_supernode_get_remote_value() {
|
|||
this_peer.addr_head = libp2p_utils_linked_list_new();
|
||||
this_peer.addr_head->item = multiaddress_new_from_string("/ip4/127.0.0.1/tcp/4001");
|
||||
libp2p_peerstore_add_peer(ipfs_node->peerstore, &this_peer);
|
||||
// set a different port for the dht/kademlia stuff
|
||||
ipfs_node->repo->config->addresses->api = "/ip4/127.0.0.1/tcp/5002";
|
||||
ipfs_node->routing = ipfs_routing_new_kademlia(ipfs_node, &fs_repo->config->identity->private_key, stream);
|
||||
|
||||
|
||||
if (ipfs_node->routing == NULL)
|
||||
goto exit;
|
||||
|
||||
|
|
1
test/test_file.txt
Normal file
1
test/test_file.txt
Normal file
|
@ -0,0 +1 @@
|
|||
Hello, World!
|
Loading…
Reference in a new issue