More work on storage and cleanup
Added flatfs, as well as fixed some memory leaks. Valgrind across tests now reports 0 memory leaks.
This commit is contained in:
parent
63c7bd72e2
commit
50ffade515
28 changed files with 580 additions and 93 deletions
2
Makefile
2
Makefile
|
@ -10,6 +10,7 @@ all:
|
|||
cd multibase; make all;
|
||||
cd os; make all;
|
||||
cd repo; make all;
|
||||
cd flatfs; make all;
|
||||
cd thirdparty; make all;
|
||||
cd test; make all;
|
||||
|
||||
|
@ -21,6 +22,7 @@ clean:
|
|||
cd multibase; make clean;
|
||||
cd os; make clean;
|
||||
cd repo; make clean;
|
||||
cd flatfs; make clean;
|
||||
cd thirdparty; make clean;
|
||||
cd test; make clean;
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ int do_init(FILE* out_file, char* repo_root, int empty, int num_bits_for_keypair
|
|||
if (fs_repo_is_initialized(repo_root))
|
||||
return 0;
|
||||
//TODO: If the conf is null, make one
|
||||
if ( conf->identity.peer_id == NULL) {
|
||||
if ( conf->identity->peer_id == NULL) {
|
||||
int retVal = repo_config_init(conf, num_bits_for_keypair, repo_root);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include "ipfs/commands/argument.h"
|
||||
|
||||
int commands_argument_free(struct Argument* argument) {
|
||||
free(argument);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ int commands_command_init(struct Command* cmd) {
|
|||
// allocate memory for each argument
|
||||
for(int i = 0; i < cmd->argument_count; i++)
|
||||
cmd->arguments[i] = malloc(sizeof(struct Argument));
|
||||
|
||||
// allocate memory for CommandOption array
|
||||
cmd->options = malloc(cmd->option_count * sizeof(struct CommandOption*));
|
||||
if (cmd->options == NULL)
|
||||
|
@ -32,6 +33,7 @@ int commands_command_free(struct Command* cmd) {
|
|||
for(int i = 0; i < cmd->argument_count; i++)
|
||||
commands_argument_free(cmd->arguments[i]);
|
||||
free(cmd->arguments);
|
||||
|
||||
//command options
|
||||
for(int i = 0; i < cmd->option_count; i++)
|
||||
commands_command_option_free(cmd->options[i]);
|
||||
|
|
|
@ -22,5 +22,6 @@ int commands_command_option_init(struct CommandOption* option, char* description
|
|||
|
||||
int commands_command_option_free(struct CommandOption* option) {
|
||||
free(option->names);
|
||||
free(option);
|
||||
return 0;
|
||||
}
|
||||
|
|
19
flatfs/Makefile
Normal file
19
flatfs/Makefile
Normal file
|
@ -0,0 +1,19 @@
|
|||
CC = gcc
|
||||
|
||||
CFLAGS = -O0 -I../include -I../../c-libp2p/include
|
||||
|
||||
ifdef DEBUG
|
||||
CFLAGS += -g3
|
||||
endif
|
||||
|
||||
LFLAGS =
|
||||
DEPS = ../include/flatfs/flatfs.h
|
||||
OBJS = flatfs.o
|
||||
|
||||
%.o: %.c $(DEPS)
|
||||
$(CC) -c -o $@ $< $(CFLAGS)
|
||||
|
||||
all: $(OBJS)
|
||||
|
||||
clean:
|
||||
rm -f *.o
|
198
flatfs/flatfs.c
Normal file
198
flatfs/flatfs.c
Normal file
|
@ -0,0 +1,198 @@
|
|||
/**
|
||||
* a datastore implementation that stores all
|
||||
* objects in a 2 level directory structure in
|
||||
* the local file system, regardless of the
|
||||
* hierarchy of the keys. Modeled after go-ds-flatfs
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "ipfs/os/utils.h"
|
||||
|
||||
#define FLATFS_MAX_PREFIX_LENGTH 16
|
||||
|
||||
/**
|
||||
* Helper (private) methods
|
||||
*/
|
||||
|
||||
/**
|
||||
* remove beginning slash from string
|
||||
* @param in the filename to look at
|
||||
* @param out a place to store the results
|
||||
* @param the size of out
|
||||
* @returns true(1) on success
|
||||
*/
|
||||
int ipfs_flatfs_remove_preceeding_slash(const char* in, char* out, size_t max_size) {
|
||||
// make sure max_size is reasonable
|
||||
if (max_size < strlen(in) + 1)
|
||||
return 0;
|
||||
int pos = 0;
|
||||
while (in[pos] == '/')
|
||||
pos++;
|
||||
strncpy(out, &in[pos], strlen(in) - pos);
|
||||
out[strlen(in) - pos] = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a directory if it doesn't already exist
|
||||
* @param full_directory the full path
|
||||
* @returns true(1) on successful create or if it already exists and is writable. false(0) otherwise.
|
||||
*/
|
||||
int ipfs_flatfs_create_directory(const char* full_directory) {
|
||||
// shortcut
|
||||
if (os_utils_directory_writeable(full_directory))
|
||||
return 1;
|
||||
// is it there, just not writeable?
|
||||
if (os_utils_directory_exists(full_directory)) {
|
||||
return 0;
|
||||
}
|
||||
// it is not there, create it
|
||||
if (mkdir(full_directory, S_IRWXU) == -1)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/***
|
||||
* public methods
|
||||
*/
|
||||
|
||||
/**
|
||||
* Given a filename (usually a long hash), derive a subdirectory name
|
||||
* @param datastore_path the path to the datastore
|
||||
* @param proposed_filename the filename to use
|
||||
* @param derived_path the complete pathname to the directory that should contain the proposed_filename
|
||||
* @param max_derived_path_length the maximum memory allocated for derived_path
|
||||
* @returns true(1) on success
|
||||
*/
|
||||
int ipfs_flatfs_get_directory(const char* datastore_path, const char* proposed_filename,
|
||||
char* derived_path, size_t max_derived_path_length) {
|
||||
// make sure max_derived_path_length is a reasonable number
|
||||
if (max_derived_path_length < strlen(datastore_path) + 17)
|
||||
return 0;
|
||||
|
||||
// remove slash prefix if there is one
|
||||
char buffer[max_derived_path_length];
|
||||
int retVal = ipfs_flatfs_remove_preceeding_slash(proposed_filename, buffer, max_derived_path_length);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
// make it 16 characters
|
||||
if (strlen(buffer) < 16) {
|
||||
int pos = strlen(buffer);
|
||||
int lacking = 16 - pos; // how many we should add
|
||||
memset(&buffer[strlen(buffer)], '_', lacking);
|
||||
buffer[pos + lacking] = 0;
|
||||
}
|
||||
// it may be too long, cut it
|
||||
if (strlen(buffer) > 16)
|
||||
buffer[16] = 0;
|
||||
retVal = os_utils_filepath_join(datastore_path, buffer, derived_path, max_derived_path_length);
|
||||
return retVal;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given the proposed filename, return the acutal filename on the disk (clean the name and add .data suffix)
|
||||
* @param proposed_filename the start
|
||||
* @param derived_filename the results
|
||||
* @param max_derived_filename_length the buffer size
|
||||
* @returns true(1) on success
|
||||
*/
|
||||
int ipfs_flatfs_get_filename(const char* proposed_filename, char* derived_filename, size_t max_derived_filename_length) {
|
||||
// get rid of slashes
|
||||
char buffer[max_derived_filename_length];
|
||||
int retVal = ipfs_flatfs_remove_preceeding_slash(proposed_filename, buffer, max_derived_filename_length);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
// make sure we have space
|
||||
if (max_derived_filename_length < strlen(buffer) + 6) // ".data" plus terminating null
|
||||
return 0;
|
||||
|
||||
// add the suffix
|
||||
strncat(buffer, ".data", 6);
|
||||
|
||||
// put it in the result buffer
|
||||
strncpy(derived_filename, buffer, strlen(buffer) + 1);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Combines the datastore path, the directory (derived from the filename itself), the proposed
|
||||
* filename, and the suffix (.data) to build a complete filename on the disk
|
||||
* @param datastore_path where the datastore is
|
||||
* @param proposed_filename the filename we want to use
|
||||
* @param derived_full_filename where the results will be put
|
||||
* @param max_derived_filename_length the size of memory allocated for "derived_full_filename"
|
||||
* @returns true(1) on success
|
||||
*/
|
||||
int ipfs_flatfs_get_full_filename(const char* datastore_path, const char* proposed_filename,
|
||||
char* derived_full_filename, size_t max_derived_filename_length) {
|
||||
// get rid of preceeding /
|
||||
char directory[max_derived_filename_length];
|
||||
int retVal = ipfs_flatfs_remove_preceeding_slash(proposed_filename, directory, max_derived_filename_length);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
// start with the path
|
||||
retVal = ipfs_flatfs_get_directory(datastore_path, proposed_filename, directory, max_derived_filename_length);
|
||||
if (retVal == 0)
|
||||
return retVal;
|
||||
|
||||
// now get the filename
|
||||
char actual_filename[max_derived_filename_length];
|
||||
retVal = ipfs_flatfs_get_filename(proposed_filename, actual_filename, max_derived_filename_length);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
// now merge the two
|
||||
retVal = os_utils_filepath_join(directory, actual_filename, derived_full_filename, max_derived_filename_length);
|
||||
|
||||
return retVal;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Write a file given the key and the contents
|
||||
* @param datastore_path the root of the flatfs datastore
|
||||
* @param key the "filename"
|
||||
* @para byte the contents of the file as a byte array
|
||||
* @param num_bytes the length of the byte array
|
||||
*/
|
||||
int ipfs_flatfs_put(const char* datastore_path, const char* key, unsigned char* byte, size_t num_bytes) {
|
||||
size_t filename_length = strlen(datastore_path) + strlen(key) + 24; // subdirectory is 16, 2 slashes, .data suffix, terminating null
|
||||
// subdirectory
|
||||
char full_filename[filename_length];
|
||||
int retVal = ipfs_flatfs_get_directory(datastore_path, key, full_filename, filename_length);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
retVal = ipfs_flatfs_create_directory(full_filename);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
// filename
|
||||
retVal = ipfs_flatfs_get_full_filename(datastore_path, key, full_filename, filename_length);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
//TODO: Error checking (i.e. too many open files
|
||||
|
||||
// write temp file
|
||||
char temp_filename[filename_length + 5];
|
||||
strncpy(temp_filename, full_filename, strlen(full_filename) + 1);
|
||||
strcat(temp_filename, ".tmp");
|
||||
FILE* out = fopen(temp_filename, "w");
|
||||
size_t bytes_written = fwrite(byte, num_bytes, 1, out);
|
||||
fclose(out);
|
||||
|
||||
// rename temp file to real name
|
||||
retVal = rename(temp_filename, full_filename);
|
||||
if (retVal != 0)
|
||||
return 0;
|
||||
|
||||
return bytes_written == num_bytes;
|
||||
}
|
38
include/ipfs/flatfs/flatfs.h
Normal file
38
include/ipfs/flatfs/flatfs.h
Normal file
|
@ -0,0 +1,38 @@
|
|||
/**
|
||||
* a datastore implementation that stores all
|
||||
* objects in a 2 level directory structure in
|
||||
* the local file system, regardless of the
|
||||
* hierarchy of the keys. Modeled after go-ds-flatfs
|
||||
*/
|
||||
|
||||
/**
|
||||
* Given a filename (usually a long hash), derive a subdirectory name
|
||||
* @param datastore_path the path to the datastore
|
||||
* @param proposed_filename the filename to use
|
||||
* @param derived_path the complete pathname to the directory that should contain the proposed_filename
|
||||
* @param max_derived_path_length the maximum memory allocated for derived_path
|
||||
* @returns true(1) on success
|
||||
*/
|
||||
int ipfs_flatfs_get_directory(const char* datastore_path, const char* proposed_filename,
|
||||
char* derived_path, size_t max_derived_path_length);
|
||||
|
||||
/**
|
||||
* Given the proposed filename, return the acutal filename on the disk (clean the name and add .data suffix)
|
||||
* @param proposed_filename the start
|
||||
* @param derived_filename the results
|
||||
* @param max_derived_filename_length the buffer size
|
||||
* @returns true(1) on success
|
||||
*/
|
||||
int ipfs_flatfs_get_filename(const char* proposed_filename, char* derived_filename, size_t max_derived_filename_length);
|
||||
|
||||
/**
|
||||
* Combines the datastore path, the directory (derived from the filename itself), the proposed
|
||||
* filename, and the suffix (.data) to build a complete filename on the disk
|
||||
* @param datastore_path where the datastore is
|
||||
* @param proposed_filename the filename we want to use
|
||||
* @param derived_full_filename where the results will be put
|
||||
* @param max_derived_filename_length the size of memory allocated for "derived_full_filename"
|
||||
* @returns true(1) on success
|
||||
*/
|
||||
int ipfs_flatfs_get_full_filename(const char* datastore_path, const char* proposed_filename,
|
||||
char* derived_full_filename, size_t max_derived_filename_length);
|
|
@ -36,4 +36,6 @@ int os_utils_file_size(const char* file_name);
|
|||
|
||||
int os_utils_directory_writeable(const char* path);
|
||||
|
||||
int os_utils_directory_exists(const char* path);
|
||||
|
||||
#endif /* utils_h */
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include "swarm.h"
|
||||
|
||||
struct Addresses {
|
||||
struct SwarmAddresses swarm;
|
||||
struct SwarmAddresses* swarm;
|
||||
char* api;
|
||||
char* gateway;
|
||||
};
|
||||
|
@ -24,10 +24,10 @@ struct Addresses {
|
|||
* @param gateway the gateway address (like "ip4/127.0.0.1/tcp/8080")
|
||||
* @returns true(1) on success, otherwise false(0)
|
||||
*/
|
||||
int repo_config_addresses_init(struct Addresses* addresses, char* api, char* gateway);
|
||||
int repo_config_addresses_new(struct Addresses** addresses, char* api, char* gateway);
|
||||
|
||||
/**
|
||||
* clear any memory allocated by a address_init call
|
||||
* clear any memory allocated by a address_new call
|
||||
* @param addresses the struct
|
||||
* @returns true(1)
|
||||
*/
|
||||
|
|
|
@ -31,15 +31,15 @@ struct Reprovider {
|
|||
};
|
||||
|
||||
struct RepoConfig {
|
||||
struct Identity identity;
|
||||
struct Datastore datastore;
|
||||
struct Addresses addresses;
|
||||
struct Identity* identity;
|
||||
struct Datastore* datastore;
|
||||
struct Addresses* addresses;
|
||||
struct Mounts mounts;
|
||||
struct Discovery discovery;
|
||||
struct Ipns ipns;
|
||||
struct BootstrapPeers peer_addresses;
|
||||
//struct tour tour;
|
||||
struct Gateway gateway;
|
||||
struct Gateway* gateway;
|
||||
//struct supernode_routing supernode_client_config;
|
||||
//struct api api;
|
||||
struct Reprovider reprovider;
|
||||
|
|
|
@ -28,9 +28,13 @@ struct Gateway {
|
|||
char* root_redirect;
|
||||
int writable;
|
||||
struct PathPrefixes path_prefixes;
|
||||
struct HTTPHeaders http_headers;
|
||||
struct HTTPHeaders* http_headers;
|
||||
};
|
||||
|
||||
int repo_config_gateway_http_header_init(struct HTTPHeaders* http_headers, char** headers, char** values, int num_elements);
|
||||
|
||||
int repo_config_gateway_new(struct Gateway** gateway);
|
||||
|
||||
int repo_config_gateway_free(struct Gateway* gateway);
|
||||
|
||||
#endif /* gateway_h */
|
||||
|
|
|
@ -23,8 +23,9 @@ struct SwarmAddresses {
|
|||
*/
|
||||
int repo_config_swarm_address_init(struct SwarmAddresses* swarm_addresses, char** addresses, int array_length);
|
||||
|
||||
int repo_config_swarm_address_new(struct SwarmAddresses** swarm_addresses);
|
||||
/***
|
||||
* free up memory from repo_config_swarm_address_init
|
||||
* free up memory from repo_config_swarm_address_new
|
||||
* @param swarm_addresses the structure
|
||||
* @returns true(1)
|
||||
*/
|
||||
|
|
|
@ -50,6 +50,12 @@ int os_utils_file_exists(const char* file_name) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int os_utils_directory_exists(const char* directory_name) {
|
||||
if (access(directory_name, F_OK) != -1)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int os_utils_directory_writeable(const char* path) {
|
||||
int result = access(path, W_OK);
|
||||
return result == 0;
|
||||
|
|
|
@ -20,18 +20,28 @@ char* alloc_and_copy(char* source) {
|
|||
return result;
|
||||
}
|
||||
|
||||
int repo_config_addresses_init(struct Addresses* addresses, char* api, char* gateway) {
|
||||
// allocate memory to store api and gateway
|
||||
addresses->api = alloc_and_copy(api);
|
||||
addresses->gateway = alloc_and_copy(gateway);
|
||||
if (addresses->api == NULL || addresses->gateway == NULL)
|
||||
int repo_config_addresses_new(struct Addresses** addresses, char* api, char* gateway) {
|
||||
*addresses = (struct Addresses*)malloc(sizeof(struct Addresses));
|
||||
if (*addresses == NULL)
|
||||
return 0;
|
||||
|
||||
// allocate memory to store api and gateway
|
||||
(*addresses)->api = alloc_and_copy(api);
|
||||
(*addresses)->gateway = alloc_and_copy(gateway);
|
||||
if ( (*addresses)->api == NULL || (*addresses)->gateway == NULL)
|
||||
return 0;
|
||||
|
||||
// allocate memory for swarm_addresses
|
||||
if (repo_config_swarm_address_new(&((*addresses)->swarm)) == 0)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int repo_config_addresses_free(struct Addresses* addresses) {
|
||||
free(addresses->api);
|
||||
free(addresses->gateway);
|
||||
repo_config_swarm_address_free(&(addresses->swarm));
|
||||
repo_config_swarm_address_free(addresses->swarm);
|
||||
free(addresses);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -53,5 +53,6 @@ int repo_config_bootstrap_peers_free(struct BootstrapPeers* list) {
|
|||
free(list->peers[i]);
|
||||
}
|
||||
}
|
||||
free(list->peers);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -89,7 +89,7 @@ int repo_config_get_file_name(char* path, char** result) {
|
|||
*/
|
||||
int repo_config_init(struct RepoConfig* config, unsigned int num_bits_for_keypair, char* repo_path) {
|
||||
// identity
|
||||
int retVal = repo_config_identity_init(&(config->identity), num_bits_for_keypair);
|
||||
int retVal = repo_config_identity_init(config->identity, num_bits_for_keypair);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
|
@ -99,17 +99,13 @@ int repo_config_init(struct RepoConfig* config, unsigned int num_bits_for_keypai
|
|||
return 0;
|
||||
|
||||
// datastore
|
||||
retVal = ipfs_repo_config_datastore_init(&(config->datastore), repo_path);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
retVal = repo_config_addresses_init(&(config->addresses), "/ip4/127.0.0.1/tcp/5001", "/ip4/127.0.0.1/tcp/8080");
|
||||
retVal = ipfs_repo_config_datastore_init(config->datastore, repo_path);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
// swarm addresses
|
||||
char** address_array = (char * []){ "/ip4/0.0.0.0/tcp/4001", "/ip6/::/tcp/4001" };
|
||||
retVal = repo_config_swarm_address_init(&(config->addresses.swarm), address_array, 2);
|
||||
retVal = repo_config_swarm_address_init(config->addresses->swarm, address_array, 2);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
|
@ -123,15 +119,15 @@ int repo_config_init(struct RepoConfig* config, unsigned int num_bits_for_keypai
|
|||
|
||||
config->reprovider.interval = "12h";
|
||||
|
||||
config->gateway.root_redirect = "";
|
||||
config->gateway.writable = 0;
|
||||
config->gateway->root_redirect = "";
|
||||
config->gateway->writable = 0;
|
||||
|
||||
config->gateway.path_prefixes.num_elements = 0;
|
||||
config->gateway->path_prefixes.num_elements = 0;
|
||||
|
||||
// gateway http headers
|
||||
char** header_array = (char * []) { "Access-Control-Allow-Origin", "Access-Control-Allow-Methods", "Access-Control-Allow-Headers" };
|
||||
char** header_values = (char*[]) { "*", "GET", "X-Requested-With" };
|
||||
retVal = repo_config_gateway_http_header_init(&(config->gateway.http_headers), header_array, header_values, 3);
|
||||
retVal = repo_config_gateway_http_header_init(config->gateway->http_headers, header_array, header_values, 3);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
|
@ -147,6 +143,28 @@ int ipfs_repo_config_new(struct RepoConfig** config) {
|
|||
*config = (struct RepoConfig*)malloc(sizeof(struct RepoConfig));
|
||||
if (*config == NULL)
|
||||
return 0;
|
||||
|
||||
// set initial values
|
||||
(*config)->peer_addresses.num_peers = 0;
|
||||
(*config)->peer_addresses.peers = NULL;
|
||||
|
||||
int retVal = 1;
|
||||
retVal = repo_config_identity_new(&((*config)->identity));
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
retVal = ipfs_repo_config_datastore_new(&((*config)->datastore));
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
retVal = repo_config_addresses_new(&((*config)->addresses), "/ip4/127.0.0.1/tcp/5001", "/ip4/127.0.0.1/tcp/8080");
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
retVal = repo_config_gateway_new(&((*config)->gateway));
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -157,9 +175,11 @@ int ipfs_repo_config_new(struct RepoConfig** config) {
|
|||
*/
|
||||
int ipfs_repo_config_free(struct RepoConfig* config) {
|
||||
if (config != NULL) {
|
||||
repo_config_identity_free(config->identity);
|
||||
repo_config_bootstrap_peers_free(&(config->peer_addresses));
|
||||
//ipfs_repo_config_datastore_free(&(config->datastore));
|
||||
repo_config_addresses_free(&(config->addresses));
|
||||
ipfs_repo_config_datastore_free(config->datastore);
|
||||
repo_config_addresses_free(config->addresses);
|
||||
repo_config_gateway_free(config->gateway);
|
||||
free(config);
|
||||
}
|
||||
return 1;
|
||||
|
|
|
@ -41,3 +41,33 @@ int repo_config_gateway_http_header_init(struct HTTPHeaders* http_headers, char*
|
|||
http_headers->num_elements = num_elements;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int repo_config_gateway_new(struct Gateway** gateway) {
|
||||
*gateway = (struct Gateway*)malloc(sizeof(struct Gateway));
|
||||
if (*gateway == NULL)
|
||||
return 0;
|
||||
(*gateway)->http_headers = (struct HTTPHeaders*)malloc(sizeof(struct HTTPHeaders));
|
||||
if ((*gateway)->http_headers == NULL) {
|
||||
free(*gateway);
|
||||
return 0;
|
||||
}
|
||||
(*gateway)->http_headers->num_elements = 0;
|
||||
(*gateway)->http_headers->headers = NULL;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int repo_config_gateway_free(struct Gateway* gateway) {
|
||||
if (gateway->http_headers != NULL) {
|
||||
for(int i = 0; i < gateway->http_headers->num_elements; i++) {
|
||||
struct HTTPHeader* currHeader = gateway->http_headers->headers[i];
|
||||
free(currHeader->header);
|
||||
free(currHeader->value);
|
||||
free(currHeader);
|
||||
}
|
||||
if (gateway->http_headers->headers != NULL)
|
||||
free(gateway->http_headers->headers); // from init
|
||||
free(gateway->http_headers); // from new
|
||||
}
|
||||
free(gateway); // from new
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -55,7 +55,8 @@ int repo_config_identity_init(struct Identity* identity, unsigned long num_bits_
|
|||
if (!libp2p_crypto_rsa_generate_keypair( &(identity->private_key), num_bits_for_keypair))
|
||||
return 0;
|
||||
|
||||
repo_config_identity_build_peer_id(identity);
|
||||
if (repo_config_identity_build_peer_id(identity) == 0)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -66,6 +67,9 @@ int repo_config_identity_new(struct Identity** identity) {
|
|||
return 0;
|
||||
|
||||
memset(*identity, 0, sizeof(struct Identity));
|
||||
|
||||
(*identity)->peer_id = NULL;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -75,6 +79,8 @@ int repo_config_identity_free(struct Identity* identity) {
|
|||
free(identity->private_key.public_key_der);
|
||||
if (identity->private_key.der != NULL)
|
||||
free(identity->private_key.der);
|
||||
if (identity->peer_id != NULL)
|
||||
free(identity->peer_id);
|
||||
free(identity);
|
||||
}
|
||||
return 1;
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
#include "ipfs/repo/config/swarm.h"
|
||||
|
||||
|
||||
int repo_config_swarm_address_init(struct SwarmAddresses* swarm_addresses, char** addresses, int array_length) {
|
||||
// allocate memory for the addresses array
|
||||
swarm_addresses->addresses = malloc(sizeof(char*) * array_length);
|
||||
|
@ -33,10 +34,23 @@ int repo_config_swarm_address_init(struct SwarmAddresses* swarm_addresses, char*
|
|||
return 1;
|
||||
}
|
||||
|
||||
int repo_config_swarm_address_new(struct SwarmAddresses** swarm_addresses) {
|
||||
*swarm_addresses = (struct SwarmAddresses*)malloc(sizeof(struct SwarmAddresses));
|
||||
if (*swarm_addresses == NULL)
|
||||
return 0;
|
||||
|
||||
(*swarm_addresses)->num_addresses = 0;
|
||||
(*swarm_addresses)->addresses = NULL;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int repo_config_swarm_address_free(struct SwarmAddresses* swarm_addresses) {
|
||||
if (swarm_addresses->addresses != NULL) {
|
||||
for (int i = 0; i < swarm_addresses->num_addresses; i++) {
|
||||
free(swarm_addresses->addresses[i]);
|
||||
}
|
||||
free(swarm_addresses->addresses);
|
||||
}
|
||||
free(swarm_addresses);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -31,39 +31,39 @@ int repo_config_write_config_file(char* full_filename, struct RepoConfig* config
|
|||
|
||||
fprintf(out_file, "{\n");
|
||||
fprintf(out_file, " \"Identity\": {\n");
|
||||
fprintf(out_file, " \"PeerID\": \"%s\",\n", config->identity.peer_id);
|
||||
fprintf(out_file, " \"PeerID\": \"%s\",\n", config->identity->peer_id);
|
||||
// print correct format of private key
|
||||
// first base 64 it
|
||||
size_t encoded_size = libp2p_crypto_encoding_base64_encode_size(config->identity.private_key.der_length);
|
||||
size_t encoded_size = libp2p_crypto_encoding_base64_encode_size(config->identity->private_key.der_length);
|
||||
unsigned char encoded_buffer[encoded_size + 1];
|
||||
int retVal = libp2p_crypto_encoding_base64_encode(config->identity.private_key.der, config->identity.private_key.der_length, encoded_buffer, encoded_size, &encoded_size);
|
||||
int retVal = libp2p_crypto_encoding_base64_encode(config->identity->private_key.der, config->identity->private_key.der_length, encoded_buffer, encoded_size, &encoded_size);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
encoded_buffer[encoded_size] = 0;
|
||||
fprintf(out_file, " \"PrivKey\": \"%s\"\n", encoded_buffer);
|
||||
fprintf(out_file, " },\n");
|
||||
fprintf(out_file, " \"Datastore\": {\n");
|
||||
fprintf(out_file, " \"Type\": \"%s\",\n", config->datastore.type);
|
||||
fprintf(out_file, " \"Path\": \"%s\",\n", config->datastore.path);
|
||||
fprintf(out_file, " \"StorageMax\": \"%s\",\n", config->datastore.storage_max);
|
||||
fprintf(out_file, " \"StorageGCWatermark\": %d,\n", config->datastore.storage_gc_watermark);
|
||||
fprintf(out_file, " \"GCPeriod\": \"%s\",\n", config->datastore.gc_period);
|
||||
fprintf(out_file, " \"Type\": \"%s\",\n", config->datastore->type);
|
||||
fprintf(out_file, " \"Path\": \"%s\",\n", config->datastore->path);
|
||||
fprintf(out_file, " \"StorageMax\": \"%s\",\n", config->datastore->storage_max);
|
||||
fprintf(out_file, " \"StorageGCWatermark\": %d,\n", config->datastore->storage_gc_watermark);
|
||||
fprintf(out_file, " \"GCPeriod\": \"%s\",\n", config->datastore->gc_period);
|
||||
fprintf(out_file, " \"Params\": null,\n");
|
||||
fprintf(out_file, " \"NoSync\": %s,\n", config->datastore.no_sync ? "true" : "false");
|
||||
fprintf(out_file, " \"HashOnRead\": %s,\n", config->datastore.hash_on_read ? "true" : "false");
|
||||
fprintf(out_file, " \"BloomFilterSize\": %d\n", config->datastore.bloom_filter_size);
|
||||
fprintf(out_file, " \"NoSync\": %s,\n", config->datastore->no_sync ? "true" : "false");
|
||||
fprintf(out_file, " \"HashOnRead\": %s,\n", config->datastore->hash_on_read ? "true" : "false");
|
||||
fprintf(out_file, " \"BloomFilterSize\": %d\n", config->datastore->bloom_filter_size);
|
||||
fprintf(out_file, " },\n \"Addresses\": {\n");
|
||||
fprintf(out_file, " \"Swarm\": [\n");
|
||||
for(int i = 0; i < config->addresses.swarm.num_addresses; i++) {
|
||||
fprintf(out_file, " \"%s\"", config->addresses.swarm.addresses[i]);
|
||||
if (i != (config->addresses.swarm.num_addresses - 1))
|
||||
for(int i = 0; i < config->addresses->swarm->num_addresses; i++) {
|
||||
fprintf(out_file, " \"%s\"", config->addresses->swarm->addresses[i]);
|
||||
if (i != (config->addresses->swarm->num_addresses - 1))
|
||||
fprintf(out_file, ",\n");
|
||||
else
|
||||
fprintf(out_file, "\n");
|
||||
}
|
||||
fprintf(out_file, " ],\n");
|
||||
fprintf(out_file, " \"API\": \"%s\",\n", config->addresses.api);
|
||||
fprintf(out_file, " \"Gateway\": \"%s\"\n", config->addresses.gateway);
|
||||
fprintf(out_file, " \"API\": \"%s\",\n", config->addresses->api);
|
||||
fprintf(out_file, " \"Gateway\": \"%s\"\n", config->addresses->gateway);
|
||||
fprintf(out_file, " },\n \"Mounts\": {\n");
|
||||
fprintf(out_file, " \"IPFS\": \"%s\",\n", config->mounts.ipfs);
|
||||
fprintf(out_file, " \"IPNS\": \"%s\",\n", config->mounts.ipns);
|
||||
|
@ -86,15 +86,15 @@ int repo_config_write_config_file(char* full_filename, struct RepoConfig* config
|
|||
fprintf(out_file, " ],\n \"Tour\": {\n \"Last\": \"\"\n },\n");
|
||||
fprintf(out_file, " \"Gateway\": {\n");
|
||||
fprintf(out_file, " \"HTTPHeaders\": {\n");
|
||||
for (int i = 0; i < config->gateway.http_headers.num_elements; i++) {
|
||||
fprintf(out_file, " \"%s\": [\n \"%s\"\n ]", config->gateway.http_headers.headers[i]->header, config->gateway.http_headers.headers[i]->value);
|
||||
if (i < config->gateway.http_headers.num_elements - 1)
|
||||
for (int i = 0; i < config->gateway->http_headers->num_elements; i++) {
|
||||
fprintf(out_file, " \"%s\": [\n \"%s\"\n ]", config->gateway->http_headers->headers[i]->header, config->gateway->http_headers->headers[i]->value);
|
||||
if (i < config->gateway->http_headers->num_elements - 1)
|
||||
fprintf(out_file, ",\n");
|
||||
else
|
||||
fprintf(out_file, "\n },\n");
|
||||
}
|
||||
fprintf(out_file, " \"RootRedirect\": \"%s\"\n", config->gateway.root_redirect);
|
||||
fprintf(out_file, " \"Writable\": %s\n", config->gateway.writable ? "true" : "false");
|
||||
fprintf(out_file, " \"RootRedirect\": \"%s\"\n", config->gateway->root_redirect);
|
||||
fprintf(out_file, " \"Writable\": %s\n", config->gateway->writable ? "true" : "false");
|
||||
fprintf(out_file, " \"PathPrefixes\": []\n");
|
||||
fprintf(out_file, " },\n \"SupernodeRouting\": {\n");
|
||||
fprintf(out_file, " \"Servers\": null\n },");
|
||||
|
@ -120,9 +120,12 @@ int ipfs_repo_fsrepo_new(char* repo_path, struct RepoConfig* config, struct FSRe
|
|||
char* home_dir = os_utils_get_homedir();
|
||||
char* default_subdir = "/.ipfs";
|
||||
unsigned long newPathLen = strlen(home_dir) + strlen(default_subdir) + 2; // 1 for slash and 1 for end
|
||||
char* newPath = malloc(sizeof(char) * newPathLen);
|
||||
os_utils_filepath_join(os_utils_get_homedir(), default_subdir, newPath, newPathLen);
|
||||
(*repo)->path = newPath;
|
||||
(*repo)->path = malloc(sizeof(char) * newPathLen);
|
||||
if ((*repo)->path == NULL) {
|
||||
free( (*repo));
|
||||
return 0;
|
||||
}
|
||||
os_utils_filepath_join(os_utils_get_homedir(), default_subdir, (*repo)->path, newPathLen);
|
||||
} else {
|
||||
int len = strlen(repo_path) + 1;
|
||||
(*repo)->path = (char*)malloc(len);
|
||||
|
@ -152,8 +155,11 @@ int ipfs_repo_fsrepo_new(char* repo_path, struct RepoConfig* config, struct FSRe
|
|||
*/
|
||||
int ipfs_repo_fsrepo_free(struct FSRepo* repo) {
|
||||
if (repo != NULL) {
|
||||
if (repo->path != NULL)
|
||||
free(repo->path);
|
||||
if (repo->config != NULL)
|
||||
ipfs_repo_config_free(repo->config);
|
||||
if (repo->data_store != NULL)
|
||||
ipfs_repo_config_datastore_free(repo->data_store);
|
||||
free(repo);
|
||||
}
|
||||
|
@ -349,11 +355,11 @@ int fs_repo_open_config(struct FSRepo* repo) {
|
|||
return 0;
|
||||
}
|
||||
// the next should be the array, then string "PeerID"
|
||||
_get_json_string_value(data, tokens, num_tokens, curr_pos, "PeerID", &repo->config->identity.peer_id);
|
||||
_get_json_string_value(data, tokens, num_tokens, curr_pos, "PeerID", &repo->config->identity->peer_id);
|
||||
char* priv_key_base64;
|
||||
// then PrivKey
|
||||
_get_json_string_value(data, tokens, num_tokens, curr_pos, "PrivKey", &priv_key_base64);
|
||||
retVal = repo_config_identity_build_private_key(&repo->config->identity, priv_key_base64);
|
||||
retVal = repo_config_identity_build_private_key(repo->config->identity, priv_key_base64);
|
||||
if (retVal == 0) {
|
||||
free(data);
|
||||
free(priv_key_base64);
|
||||
|
@ -361,15 +367,15 @@ int fs_repo_open_config(struct FSRepo* repo) {
|
|||
}
|
||||
// now the datastore
|
||||
int datastore_position = _find_token(data, tokens, num_tokens, 0, "Datastore");
|
||||
_get_json_string_value(data, tokens, num_tokens, curr_pos, "Type", &repo->config->datastore.type);
|
||||
_get_json_string_value(data, tokens, num_tokens, curr_pos, "Path", &repo->config->datastore.path);
|
||||
_get_json_string_value(data, tokens, num_tokens, curr_pos, "StorageMax", &repo->config->datastore.storage_max);
|
||||
_get_json_int_value(data, tokens, num_tokens, curr_pos, "StorageGCWatermark", &repo->config->datastore.storage_gc_watermark);
|
||||
_get_json_string_value(data, tokens, num_tokens, curr_pos, "GCPeriod", &repo->config->datastore.gc_period);
|
||||
_get_json_string_value(data, tokens, num_tokens, curr_pos, "Params", &repo->config->datastore.params);
|
||||
_get_json_int_value(data, tokens, num_tokens, curr_pos, "NoSync", &repo->config->datastore.no_sync);
|
||||
_get_json_int_value(data, tokens, num_tokens, curr_pos, "HashOnRead", &repo->config->datastore.hash_on_read);
|
||||
_get_json_int_value(data, tokens, num_tokens, curr_pos, "BloomFilterSize", &repo->config->datastore.bloom_filter_size);
|
||||
_get_json_string_value(data, tokens, num_tokens, curr_pos, "Type", &repo->config->datastore->type);
|
||||
_get_json_string_value(data, tokens, num_tokens, curr_pos, "Path", &repo->config->datastore->path);
|
||||
_get_json_string_value(data, tokens, num_tokens, curr_pos, "StorageMax", &repo->config->datastore->storage_max);
|
||||
_get_json_int_value(data, tokens, num_tokens, curr_pos, "StorageGCWatermark", &repo->config->datastore->storage_gc_watermark);
|
||||
_get_json_string_value(data, tokens, num_tokens, curr_pos, "GCPeriod", &repo->config->datastore->gc_period);
|
||||
_get_json_string_value(data, tokens, num_tokens, curr_pos, "Params", &repo->config->datastore->params);
|
||||
_get_json_int_value(data, tokens, num_tokens, curr_pos, "NoSync", &repo->config->datastore->no_sync);
|
||||
_get_json_int_value(data, tokens, num_tokens, curr_pos, "HashOnRead", &repo->config->datastore->hash_on_read);
|
||||
_get_json_int_value(data, tokens, num_tokens, curr_pos, "BloomFilterSize", &repo->config->datastore->bloom_filter_size);
|
||||
|
||||
// free the memory used reading the json file
|
||||
free(data);
|
||||
|
@ -396,7 +402,7 @@ int fs_repo_open_datastore(struct FSRepo* repo) {
|
|||
char** argv = NULL;
|
||||
|
||||
// copy struct from config area to this area
|
||||
repo->data_store = &repo->config->datastore;
|
||||
repo->data_store = repo->config->datastore;
|
||||
|
||||
if (strncmp(repo->data_store->type, "lmdb", 4) == 0) {
|
||||
// this is a LightningDB. Open it.
|
||||
|
|
|
@ -7,7 +7,8 @@ OBJS = testit.o ../cmd/ipfs/init.o ../commands/argument.o ../commands/command_op
|
|||
../repo/fsrepo/jsmn.o ../repo/fsrepo/lmdb_datastore.o ../repo/config/config.o ../os/utils.o ../repo/config/identity.o \
|
||||
../repo/config/bootstrap_peers.o ../repo/config/datastore.o ../repo/config/gateway.o \
|
||||
../repo/config/addresses.o ../repo/config/swarm.o ../repo/config/peer.o \
|
||||
../thirdparty/ipfsaddr/ipfs_addr.o ../cid/cid.o ../multibase/multibase.o
|
||||
../thirdparty/ipfsaddr/ipfs_addr.o ../cid/cid.o ../multibase/multibase.o \
|
||||
../flatfs/flatfs.o
|
||||
|
||||
%.o: %.c $(DEPS)
|
||||
$(CC) -c -o $@ $< $(CFLAGS)
|
||||
|
|
|
@ -27,19 +27,29 @@ int test_init_new_installation() {
|
|||
// run the methods
|
||||
|
||||
retVal = request.cmd.pre_run(&request);
|
||||
if (retVal == 0)
|
||||
if (retVal == 0) {
|
||||
free(request.invoc_context);
|
||||
ipfs_cmd_ipfs_init_command_free(&request.cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
retVal = request.cmd.run(&request);
|
||||
if (retVal == 0)
|
||||
if (retVal == 0) {
|
||||
free(request.invoc_context);
|
||||
ipfs_cmd_ipfs_init_command_free(&request.cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
retVal = request.cmd.post_run(&request);
|
||||
if (retVal == 0)
|
||||
if (retVal == 0) {
|
||||
free(request.invoc_context);
|
||||
ipfs_cmd_ipfs_init_command_free(&request.cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// clean up
|
||||
ipfs_cmd_ipfs_init_command_free( &request.cmd );
|
||||
free(request.invoc_context);
|
||||
|
||||
// make sure the repository exists
|
||||
retVal = os_utils_file_exists("/tmp/.ipfs/config");
|
||||
|
|
80
test/flatfs/test_flatfs.h
Normal file
80
test/flatfs/test_flatfs.h
Normal file
|
@ -0,0 +1,80 @@
|
|||
#include "ipfs/flatfs/flatfs.h"
|
||||
|
||||
int test_flatfs_get_directory() {
|
||||
char* datastore_directory = "/tmp/";
|
||||
char* proposed_filename = "/ABC123XYZ";
|
||||
size_t results_len = 256;
|
||||
char results[results_len];
|
||||
|
||||
// buffer too small
|
||||
int retVal = ipfs_flatfs_get_directory(datastore_directory, proposed_filename, results, 21);
|
||||
if (retVal != 0)
|
||||
return 0;
|
||||
|
||||
// buffer just right
|
||||
retVal = ipfs_flatfs_get_directory(datastore_directory, proposed_filename, results, 22);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
if (strcmp(results, "/tmp/ABC123XYZ_______") != 0)
|
||||
return 0;
|
||||
|
||||
// name too long
|
||||
proposed_filename = "12345678901234567";
|
||||
retVal = ipfs_flatfs_get_directory(datastore_directory, proposed_filename, results, 22);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
if (strcmp(results, "/tmp/1234567890123456") != 0)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int test_flatfs_get_filename() {
|
||||
char* proposed_filename = "/ABC123XYZ";
|
||||
size_t results_len = 256;
|
||||
char results[results_len];
|
||||
|
||||
// buffer too small
|
||||
int retVal = ipfs_flatfs_get_filename(proposed_filename, results, 14);
|
||||
if (retVal != 0)
|
||||
return 0;
|
||||
|
||||
// buffer just right
|
||||
retVal = ipfs_flatfs_get_filename(proposed_filename, results, 15);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
if (strcmp(results, "ABC123XYZ.data") != 0)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
|
||||
}
|
||||
|
||||
int test_flatfs_get_full_filename() {
|
||||
char* datastore_directory = "/tmp/";
|
||||
char* proposed_filename = "/ABC123XYZ";
|
||||
size_t results_len = 256;
|
||||
char results[results_len];
|
||||
|
||||
// buffer too small
|
||||
int retVal = ipfs_flatfs_get_full_filename(datastore_directory, proposed_filename, results, 21);
|
||||
if (retVal != 0)
|
||||
return 0;
|
||||
|
||||
// buffer just right
|
||||
retVal = ipfs_flatfs_get_full_filename(datastore_directory, proposed_filename, results, 50);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
if (strcmp(results, "/tmp/ABC123XYZ_______/ABC123XYZ.data") != 0)
|
||||
return 0;
|
||||
|
||||
// name too long
|
||||
proposed_filename = "12345678901234567";
|
||||
retVal = ipfs_flatfs_get_full_filename(datastore_directory, proposed_filename, results, 50);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
if (strcmp(results, "/tmp/1234567890123456/12345678901234567.data") != 0)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
|
@ -13,6 +13,19 @@
|
|||
#include "ipfs/repo/fsrepo/fs_repo.h"
|
||||
#include "ipfs/os/utils.h"
|
||||
|
||||
int test_repo_config_new() {
|
||||
struct RepoConfig* repoConfig;
|
||||
int retVal = ipfs_repo_config_new(&repoConfig);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
retVal = ipfs_repo_config_free(repoConfig);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int test_repo_config_init() {
|
||||
struct RepoConfig* repoConfig;
|
||||
int retVal = ipfs_repo_config_new(&repoConfig);
|
||||
|
@ -26,26 +39,26 @@ int test_repo_config_init() {
|
|||
// now tear it apart to check for anything broken
|
||||
|
||||
// addresses
|
||||
retVal = strncmp(repoConfig->addresses.api, "/ip4/127.0.0.1/tcp/5001", 23);
|
||||
retVal = strncmp(repoConfig->addresses->api, "/ip4/127.0.0.1/tcp/5001", 23);
|
||||
if (retVal != 0)
|
||||
return 0;
|
||||
retVal = strncmp(repoConfig->addresses.gateway, "/ip4/127.0.0.1/tcp/8080", 23);
|
||||
retVal = strncmp(repoConfig->addresses->gateway, "/ip4/127.0.0.1/tcp/8080", 23);
|
||||
if (retVal != 0)
|
||||
return 0;
|
||||
|
||||
if (repoConfig->addresses.swarm.num_addresses != 2)
|
||||
if (repoConfig->addresses->swarm->num_addresses != 2)
|
||||
return 0;
|
||||
|
||||
retVal = strncmp(repoConfig->addresses.swarm.addresses[0], "/ip4/0.0.0.0/tcp/4001", 21);
|
||||
retVal = strncmp(repoConfig->addresses->swarm->addresses[0], "/ip4/0.0.0.0/tcp/4001", 21);
|
||||
if (retVal != 0)
|
||||
return 0;
|
||||
|
||||
retVal = strncmp(repoConfig->addresses.swarm.addresses[1], "/ip6/::/tcp/4001", 16);
|
||||
retVal = strncmp(repoConfig->addresses->swarm->addresses[1], "/ip6/::/tcp/4001", 16);
|
||||
if (retVal != 0)
|
||||
return 0;
|
||||
|
||||
// datastore
|
||||
retVal = strncmp(repoConfig->datastore.path, "/Users/JohnJones/.ipfs/datastore", 32);
|
||||
retVal = strncmp(repoConfig->datastore->path, "/Users/JohnJones/.ipfs/datastore", 32);
|
||||
if (retVal != 0)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
#include "ipfs/repo/fsrepo/fs_repo.h"
|
||||
|
||||
int test_repo_fsrepo_open_config() {
|
||||
struct FSRepo* fs_repo;
|
||||
struct RepoConfig* repo_config;
|
||||
struct FSRepo* fs_repo = NULL;
|
||||
struct RepoConfig* repo_config = NULL;
|
||||
|
||||
const char* path = "/tmp/.ipfs";
|
||||
|
||||
|
@ -13,6 +13,12 @@ int test_repo_fsrepo_open_config() {
|
|||
|
||||
// open the repository and read the file
|
||||
retVal = ipfs_repo_fsrepo_open(fs_repo);
|
||||
if (retVal == 0) {
|
||||
ipfs_repo_fsrepo_free(fs_repo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
retVal = ipfs_repo_fsrepo_free(fs_repo);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -15,14 +15,24 @@
|
|||
#include "libp2p/crypto/encoding/base64.h"
|
||||
|
||||
int test_repo_config_identity_new() {
|
||||
struct Identity identity;
|
||||
int retVal = repo_config_identity_init(&identity, 2046);
|
||||
struct Identity* identity;
|
||||
int retVal = repo_config_identity_new(&identity);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
retVal = repo_config_identity_init(identity, 2046);
|
||||
if (retVal == 0)
|
||||
return 0;
|
||||
|
||||
// now examine it
|
||||
int privateKeySize = sizeof(identity.private_key);
|
||||
int privateKeySize = sizeof(identity->private_key);
|
||||
if (privateKeySize < 0) {
|
||||
printf("Private key structure size should be greater than 0\n");
|
||||
retVal = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
retVal = repo_config_identity_free(identity);
|
||||
|
||||
return retVal;
|
||||
}
|
||||
|
||||
|
@ -34,6 +44,7 @@ int test_repo_config_identity_private_key() {
|
|||
libp2p_crypto_encoding_base64_decode(priv_b64, strlen(priv_b64), out_buff, decoded_len, &decoded_len);
|
||||
char str[decoded_len];
|
||||
int j = 0;
|
||||
free(out_buff);
|
||||
// now test
|
||||
return 1;
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include "repo/test_repo_fsrepo.h"
|
||||
#include "cmd/ipfs/test_init.h"
|
||||
#include "cid/test_cid.h"
|
||||
#include "flatfs/test_flatfs.h"
|
||||
|
||||
int testit(const char* name, int (*func)(void)) {
|
||||
printf("Testing %s...\n", name);
|
||||
|
@ -21,13 +22,17 @@ int main(int argc, char** argv) {
|
|||
counter += testit("test_cid_cast_multihash", test_cid_cast_multihash);
|
||||
counter += testit("test_cid_cast_non_multihash", test_cid_cast_non_multihash);
|
||||
counter += testit("test_init_new_installation", test_init_new_installation);
|
||||
counter += testit("test_repo_config_new", test_repo_config_new);
|
||||
counter += testit("test_repo_config_init", test_repo_config_init);
|
||||
counter += testit("test_repo_config_write", test_repo_config_write);
|
||||
counter += testit("test_repo_config_identity_new", test_repo_config_identity_new);
|
||||
counter += testit("test_repo_config_identity_private_key", test_repo_config_identity_private_key);
|
||||
counter += testit("test_reop_bootstrap_peers_init", test_repo_bootstrap_peers_init);
|
||||
counter += testit("test_repo_bootstrap_peers_init", test_repo_bootstrap_peers_init);
|
||||
counter += testit("get_init_command", test_get_init_command);
|
||||
counter += testit("test_fs_repo_open", test_repo_fsrepo_open_config);
|
||||
counter += testit("test_flatfs_get_directory", test_flatfs_get_directory);
|
||||
counter += testit("test_flatfs_get_filename", test_flatfs_get_filename);
|
||||
counter += testit("test_flatfs_get_full_filename", test_flatfs_get_full_filename);
|
||||
if (counter > 0) {
|
||||
printf("***** There were %d failed test(s) *****\n", counter);
|
||||
} else {
|
||||
|
|
Loading…
Reference in a new issue