2017-10-04 14:36:38 +00:00
|
|
|
// these two for strdup
|
|
|
|
#define _GNU_SOURCE
|
|
|
|
#define __USE_GNU
|
2016-12-12 11:27:06 +00:00
|
|
|
#include <stdio.h>
|
2016-12-22 15:21:18 +00:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2017-09-20 17:39:26 +00:00
|
|
|
#include <pthread.h>
|
2016-12-12 11:27:06 +00:00
|
|
|
|
|
|
|
#include "ipfs/importer/importer.h"
|
2016-12-14 17:07:43 +00:00
|
|
|
#include "ipfs/merkledag/merkledag.h"
|
2017-04-06 14:33:28 +00:00
|
|
|
#include "libp2p/os/utils.h"
|
2017-09-26 14:43:10 +00:00
|
|
|
#include "ipfs/cmd/cli.h"
|
2017-04-20 22:56:03 +00:00
|
|
|
#include "ipfs/core/ipfs_node.h"
|
2017-10-04 14:36:38 +00:00
|
|
|
#include "ipfs/core/http_request.h"
|
2016-12-21 13:08:44 +00:00
|
|
|
#include "ipfs/repo/fsrepo/fs_repo.h"
|
2017-04-06 22:46:40 +00:00
|
|
|
#include "ipfs/repo/init.h"
|
2016-12-22 15:21:18 +00:00
|
|
|
#include "ipfs/unixfs/unixfs.h"
|
2016-12-12 11:27:06 +00:00
|
|
|
|
|
|
|
#define MAX_DATA_SIZE 262144 // 1024 * 256;
|
|
|
|
|
2016-12-14 11:25:09 +00:00
|
|
|
/***
|
|
|
|
* Imports OS files into the datastore
|
|
|
|
*/
|
|
|
|
|
2016-12-23 22:21:04 +00:00
|
|
|
/***
|
|
|
|
* adds a blocksize to the UnixFS structure stored in the data
|
|
|
|
* element of a Node
|
|
|
|
* @param node the node to work with
|
|
|
|
* @param blocksize the blocksize to add
|
|
|
|
* @returns true(1) on success
|
|
|
|
*/
|
2017-04-20 22:56:03 +00:00
|
|
|
int ipfs_importer_add_filesize_to_data_section(struct HashtableNode* node, size_t bytes_read) {
|
2016-12-23 22:21:04 +00:00
|
|
|
// now add to the data section
|
|
|
|
struct UnixFS* data_section = NULL;
|
|
|
|
if (node->data == NULL) {
|
|
|
|
// nothing in data section yet, create new UnixFS
|
|
|
|
ipfs_unixfs_new(&data_section);
|
|
|
|
data_section->data_type = UNIXFS_FILE;
|
|
|
|
} else {
|
|
|
|
ipfs_unixfs_protobuf_decode(node->data, node->data_size, &data_section);
|
|
|
|
}
|
|
|
|
struct UnixFSBlockSizeNode bs;
|
|
|
|
bs.block_size = bytes_read;
|
|
|
|
ipfs_unixfs_add_blocksize(&bs, data_section);
|
|
|
|
data_section->file_size += bytes_read;
|
|
|
|
// put the new data back in the data section
|
|
|
|
size_t protobuf_size = ipfs_unixfs_protobuf_encode_size(data_section); //delay bytes_size entry
|
|
|
|
unsigned char protobuf[protobuf_size];
|
|
|
|
ipfs_unixfs_protobuf_encode(data_section, protobuf, protobuf_size, &protobuf_size);
|
|
|
|
ipfs_unixfs_free(data_section);
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_hashtable_node_set_data(node, protobuf, protobuf_size);
|
2016-12-23 22:21:04 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-12-12 11:27:06 +00:00
|
|
|
/**
|
|
|
|
* read the next chunk of bytes, create a node, and add a link to the node in the passed-in node
|
|
|
|
* @param file the file handle
|
|
|
|
* @param node the node to add to
|
|
|
|
* @returns number of bytes read
|
|
|
|
*/
|
2017-04-20 22:56:03 +00:00
|
|
|
size_t ipfs_import_chunk(FILE* file, struct HashtableNode* parent_node, struct FSRepo* fs_repo, size_t* total_size, size_t* bytes_written) {
|
2016-12-12 11:27:06 +00:00
|
|
|
unsigned char buffer[MAX_DATA_SIZE];
|
2016-12-14 17:07:43 +00:00
|
|
|
size_t bytes_read = fread(buffer, 1, MAX_DATA_SIZE, file);
|
2016-12-15 10:40:24 +00:00
|
|
|
|
2016-12-23 15:49:30 +00:00
|
|
|
// structs used by this method
|
2016-12-23 14:37:43 +00:00
|
|
|
struct UnixFS* new_unixfs = NULL;
|
2017-04-20 22:56:03 +00:00
|
|
|
struct HashtableNode* new_node = NULL;
|
2016-12-23 15:49:30 +00:00
|
|
|
struct NodeLink* new_link = NULL;
|
|
|
|
|
|
|
|
// put the file bits into a new UnixFS file
|
|
|
|
if (ipfs_unixfs_new(&new_unixfs) == 0)
|
|
|
|
return 0;
|
2016-12-23 14:37:43 +00:00
|
|
|
new_unixfs->data_type = UNIXFS_FILE;
|
2016-12-23 22:21:04 +00:00
|
|
|
new_unixfs->file_size = bytes_read;
|
2016-12-23 15:49:30 +00:00
|
|
|
if (ipfs_unixfs_add_data(&buffer[0], bytes_read, new_unixfs) == 0) {
|
|
|
|
ipfs_unixfs_free(new_unixfs);
|
|
|
|
return 0;
|
|
|
|
}
|
2016-12-23 14:37:43 +00:00
|
|
|
// protobuf the UnixFS
|
|
|
|
size_t protobuf_size = ipfs_unixfs_protobuf_encode_size(new_unixfs);
|
2016-12-23 15:49:30 +00:00
|
|
|
if (protobuf_size == 0) {
|
|
|
|
ipfs_unixfs_free(new_unixfs);
|
|
|
|
return 0;
|
|
|
|
}
|
2016-12-23 14:37:43 +00:00
|
|
|
unsigned char protobuf[protobuf_size];
|
2016-12-29 09:42:01 +00:00
|
|
|
*bytes_written = 0;
|
|
|
|
if (ipfs_unixfs_protobuf_encode(new_unixfs, protobuf, protobuf_size, bytes_written) == 0) {
|
2016-12-23 15:49:30 +00:00
|
|
|
ipfs_unixfs_free(new_unixfs);
|
|
|
|
return 0;
|
|
|
|
}
|
2016-12-24 01:12:51 +00:00
|
|
|
|
2016-12-23 15:49:30 +00:00
|
|
|
// we're done with the UnixFS object
|
|
|
|
ipfs_unixfs_free(new_unixfs);
|
2016-12-23 17:53:03 +00:00
|
|
|
|
2016-12-29 09:42:01 +00:00
|
|
|
size_t size_of_node = 0;
|
|
|
|
|
2016-12-23 17:53:03 +00:00
|
|
|
// if there is more to read, create a new node.
|
|
|
|
if (bytes_read == MAX_DATA_SIZE) {
|
|
|
|
// create a new node
|
2017-04-20 22:56:03 +00:00
|
|
|
if (ipfs_hashtable_node_new_from_data(protobuf, *bytes_written, &new_node) == 0) {
|
2016-12-22 15:21:18 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2016-12-23 17:53:03 +00:00
|
|
|
// persist
|
|
|
|
size_t size_of_node = 0;
|
|
|
|
if (ipfs_merkledag_add(new_node, fs_repo, &size_of_node) == 0) {
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_hashtable_node_free(new_node);
|
2016-12-23 17:53:03 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// put link in parent node
|
2016-12-24 01:12:51 +00:00
|
|
|
if (ipfs_node_link_create(NULL, new_node->hash, new_node->hash_size, &new_link) == 0) {
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_hashtable_node_free(new_node);
|
2016-12-22 15:21:18 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2016-12-23 17:53:03 +00:00
|
|
|
new_link->t_size = size_of_node;
|
|
|
|
*total_size += new_link->t_size;
|
|
|
|
// NOTE: disposal of this link object happens when the parent is disposed
|
2017-04-20 22:56:03 +00:00
|
|
|
if (ipfs_hashtable_node_add_link(parent_node, new_link) == 0) {
|
|
|
|
ipfs_hashtable_node_free(new_node);
|
2016-12-22 15:21:18 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2016-12-23 22:21:04 +00:00
|
|
|
ipfs_importer_add_filesize_to_data_section(parent_node, bytes_read);
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_hashtable_node_free(new_node);
|
2016-12-29 09:42:01 +00:00
|
|
|
*bytes_written = size_of_node;
|
|
|
|
size_of_node = 0;
|
2016-12-23 17:53:03 +00:00
|
|
|
} else {
|
|
|
|
// if there are no existing links, put what we pulled from the file into parent_node
|
|
|
|
// otherwise, add it as a link
|
|
|
|
if (parent_node->head_link == NULL) {
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_hashtable_node_set_data(parent_node, protobuf, *bytes_written);
|
2016-12-23 17:53:03 +00:00
|
|
|
} else {
|
|
|
|
// there are existing links. put the data in a new node, save it, then put the link in parent_node
|
|
|
|
// create a new node
|
2017-04-20 22:56:03 +00:00
|
|
|
if (ipfs_hashtable_node_new_from_data(protobuf, *bytes_written, &new_node) == 0) {
|
2016-12-23 17:53:03 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
// persist
|
|
|
|
if (ipfs_merkledag_add(new_node, fs_repo, &size_of_node) == 0) {
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_hashtable_node_free(new_node);
|
2016-12-23 17:53:03 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// put link in parent node
|
2016-12-24 01:12:51 +00:00
|
|
|
if (ipfs_node_link_create(NULL, new_node->hash, new_node->hash_size, &new_link) == 0) {
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_hashtable_node_free(new_node);
|
2016-12-23 17:53:03 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
new_link->t_size = size_of_node;
|
|
|
|
*total_size += new_link->t_size;
|
|
|
|
// NOTE: disposal of this link object happens when the parent is disposed
|
2017-04-20 22:56:03 +00:00
|
|
|
if (ipfs_hashtable_node_add_link(parent_node, new_link) == 0) {
|
|
|
|
ipfs_hashtable_node_free(new_node);
|
2016-12-23 17:53:03 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2016-12-23 22:21:04 +00:00
|
|
|
ipfs_importer_add_filesize_to_data_section(parent_node, bytes_read);
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_hashtable_node_free(new_node);
|
2016-12-23 17:53:03 +00:00
|
|
|
}
|
2016-12-14 17:07:43 +00:00
|
|
|
// persist the main node
|
2016-12-29 09:42:01 +00:00
|
|
|
ipfs_merkledag_add(parent_node, fs_repo, bytes_written);
|
|
|
|
*bytes_written += size_of_node;
|
2016-12-23 17:53:03 +00:00
|
|
|
} // add to parent vs add as link
|
|
|
|
|
2016-12-12 11:27:06 +00:00
|
|
|
return bytes_read;
|
|
|
|
}
|
|
|
|
|
2016-12-29 03:45:35 +00:00
|
|
|
/**
|
|
|
|
* Prints to the console the results of a node import
|
|
|
|
* @param node the node imported
|
|
|
|
* @param file_name the name of the file
|
|
|
|
* @returns true(1) if successful, false(0) if couldn't generate the MultiHash to be displayed
|
|
|
|
*/
|
2017-04-20 22:56:03 +00:00
|
|
|
int ipfs_import_print_node_results(const struct HashtableNode* node, const char* file_name) {
|
2016-12-29 03:45:35 +00:00
|
|
|
// give some results to the user
|
|
|
|
//TODO: if directory_entry is itself a directory, traverse and report files
|
|
|
|
int buffer_len = 100;
|
|
|
|
unsigned char buffer[buffer_len];
|
|
|
|
if (ipfs_cid_hash_to_base58(node->hash, node->hash_size, buffer, buffer_len) == 0) {
|
|
|
|
printf("Unable to generate hash for file %s.\n", file_name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
printf("added %s %s\n", buffer, file_name);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-12-12 11:27:06 +00:00
|
|
|
/**
|
2016-12-28 02:39:58 +00:00
|
|
|
* Creates a node based on an incoming file or directory
|
|
|
|
* NOTE: this can be called recursively for directories
|
2017-03-19 12:47:19 +00:00
|
|
|
* NOTE: When this function completes, parent_node will be either:
|
|
|
|
* 1) the complete file, in the case of a small file (<256k-ish)
|
|
|
|
* 2) a node with links to the various pieces of a large file
|
|
|
|
* 3) a node with links to files and directories if 'fileName' is a directory
|
2017-01-02 04:48:09 +00:00
|
|
|
* @param root_dir the directory for where to look for the file
|
2016-12-28 02:39:58 +00:00
|
|
|
* @param file_name the file (or directory) to import
|
|
|
|
* @param parent_node the root node (has links to others in case this is a large file and is split)
|
2017-01-02 04:48:09 +00:00
|
|
|
* @param fs_repo the ipfs repository
|
|
|
|
* @param bytes_written number of bytes written to disk
|
|
|
|
* @param recursive true if we should navigate directories
|
2016-12-12 11:27:06 +00:00
|
|
|
* @returns true(1) on success
|
|
|
|
*/
|
2017-04-20 22:56:03 +00:00
|
|
|
int ipfs_import_file(const char* root_dir, const char* fileName, struct HashtableNode** parent_node, struct IpfsNode* local_node, size_t* bytes_written, int recursive) {
|
2016-12-28 02:39:58 +00:00
|
|
|
/**
|
|
|
|
* NOTE: When this function completes, parent_node will be either:
|
|
|
|
* 1) the complete file, in the case of a small file (<256k-ish)
|
|
|
|
* 2) a node with links to the various pieces of a large file
|
|
|
|
* 3) a node with links to files and directories if 'fileName' is a directory
|
|
|
|
*/
|
2016-12-12 11:27:06 +00:00
|
|
|
int retVal = 1;
|
2016-12-14 11:25:09 +00:00
|
|
|
int bytes_read = MAX_DATA_SIZE;
|
2016-12-22 15:21:18 +00:00
|
|
|
size_t total_size = 0;
|
2016-12-12 11:27:06 +00:00
|
|
|
|
2016-12-29 03:45:35 +00:00
|
|
|
if (os_utils_is_directory(fileName)) {
|
2017-01-02 04:48:09 +00:00
|
|
|
// calculate the new root_dir
|
|
|
|
char* new_root_dir = (char*)root_dir;
|
|
|
|
char* path = NULL;
|
|
|
|
char* file = NULL;
|
|
|
|
os_utils_split_filename(fileName, &path, &file);
|
|
|
|
if (root_dir == NULL) {
|
|
|
|
new_root_dir = file;
|
|
|
|
} else {
|
|
|
|
free(path);
|
|
|
|
path = malloc(strlen(root_dir) + strlen(file) + 2);
|
2017-10-05 20:14:47 +00:00
|
|
|
if (path == NULL) {
|
|
|
|
// memory issue
|
|
|
|
if (file != NULL)
|
|
|
|
free(file);
|
|
|
|
return 0;
|
|
|
|
}
|
2017-01-02 04:48:09 +00:00
|
|
|
os_utils_filepath_join(root_dir, file, path, strlen(root_dir) + strlen(file) + 2);
|
|
|
|
new_root_dir = path;
|
|
|
|
}
|
2016-12-29 03:45:35 +00:00
|
|
|
// initialize parent_node as a directory
|
2017-04-20 22:56:03 +00:00
|
|
|
if (ipfs_hashtable_node_create_directory(parent_node) == 0) {
|
2017-01-02 04:48:09 +00:00
|
|
|
if (path != NULL)
|
|
|
|
free(path);
|
|
|
|
if (file != NULL)
|
|
|
|
free(file);
|
2016-12-29 03:45:35 +00:00
|
|
|
return 0;
|
2017-01-02 04:48:09 +00:00
|
|
|
}
|
2016-12-28 02:39:58 +00:00
|
|
|
// get list of files
|
2016-12-29 03:45:35 +00:00
|
|
|
struct FileList* first = os_utils_list_directory(fileName);
|
|
|
|
struct FileList* next = first;
|
2017-01-02 04:48:09 +00:00
|
|
|
if (recursive) {
|
|
|
|
while (next != NULL) {
|
|
|
|
// process each file. NOTE: could be an embedded directory
|
|
|
|
*bytes_written = 0;
|
2017-04-20 22:56:03 +00:00
|
|
|
struct HashtableNode* file_node;
|
2017-01-02 04:48:09 +00:00
|
|
|
// put the filename together from fileName, which is the directory, and next->file_name
|
|
|
|
// which is a file (or a directory) within the directory we just found.
|
|
|
|
size_t filename_len = strlen(fileName) + strlen(next->file_name) + 2;
|
|
|
|
char full_file_name[filename_len];
|
|
|
|
os_utils_filepath_join(fileName, next->file_name, full_file_name, filename_len);
|
|
|
|
// adjust root directory
|
|
|
|
|
2017-04-20 22:56:03 +00:00
|
|
|
if (ipfs_import_file(new_root_dir, full_file_name, &file_node, local_node, bytes_written, recursive) == 0) {
|
|
|
|
ipfs_hashtable_node_free(*parent_node);
|
2017-01-02 04:48:09 +00:00
|
|
|
os_utils_free_file_list(first);
|
|
|
|
if (file != NULL)
|
|
|
|
free(file);
|
|
|
|
if (path != NULL)
|
|
|
|
free (path);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
// TODO: probably need to display what was imported
|
|
|
|
int len = strlen(next->file_name) + strlen(new_root_dir) + 2;
|
|
|
|
char full_path[len];
|
|
|
|
os_utils_filepath_join(new_root_dir, next->file_name, full_path, len);
|
|
|
|
ipfs_import_print_node_results(file_node, full_path);
|
|
|
|
// TODO: Determine what needs to be done if this file_node is a file, a split file, or a directory
|
|
|
|
// Create link from file_node
|
|
|
|
struct NodeLink* file_node_link;
|
|
|
|
ipfs_node_link_create(next->file_name, file_node->hash, file_node->hash_size, &file_node_link);
|
|
|
|
file_node_link->t_size = *bytes_written;
|
|
|
|
// add file_node as link to parent_node
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_hashtable_node_add_link(*parent_node, file_node_link);
|
2017-01-02 04:48:09 +00:00
|
|
|
// clean up file_node
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_hashtable_node_free(file_node);
|
2017-01-02 04:48:09 +00:00
|
|
|
// move to next file in list
|
|
|
|
next = next->next;
|
|
|
|
} // while going through files
|
|
|
|
}
|
2016-12-29 03:45:35 +00:00
|
|
|
// save the parent_node (the directory)
|
|
|
|
size_t bytes_written;
|
2017-04-20 22:56:03 +00:00
|
|
|
ipfs_merkledag_add(*parent_node, local_node->repo, &bytes_written);
|
2017-01-02 04:48:09 +00:00
|
|
|
if (file != NULL)
|
|
|
|
free(file);
|
|
|
|
if (path != NULL)
|
|
|
|
free (path);
|
2017-01-02 05:38:09 +00:00
|
|
|
os_utils_free_file_list(first);
|
2016-12-28 02:39:58 +00:00
|
|
|
} else {
|
|
|
|
// process this file
|
|
|
|
FILE* file = fopen(fileName, "rb");
|
2017-09-28 18:21:34 +00:00
|
|
|
if (file == 0)
|
|
|
|
return 0;
|
2017-04-20 22:56:03 +00:00
|
|
|
retVal = ipfs_hashtable_node_new(parent_node);
|
2017-01-02 05:38:09 +00:00
|
|
|
if (retVal == 0) {
|
2016-12-28 02:39:58 +00:00
|
|
|
return 0;
|
2017-01-02 05:38:09 +00:00
|
|
|
}
|
2016-12-12 11:27:06 +00:00
|
|
|
|
2016-12-28 02:39:58 +00:00
|
|
|
// add all nodes (will be called multiple times for large files)
|
|
|
|
while ( bytes_read == MAX_DATA_SIZE) {
|
2016-12-29 09:42:01 +00:00
|
|
|
size_t written = 0;
|
2017-04-20 22:56:03 +00:00
|
|
|
bytes_read = ipfs_import_chunk(file, *parent_node, local_node->repo, &total_size, &written);
|
2016-12-29 09:42:01 +00:00
|
|
|
*bytes_written += written;
|
2016-12-28 02:39:58 +00:00
|
|
|
}
|
|
|
|
fclose(file);
|
2016-12-14 11:25:09 +00:00
|
|
|
}
|
2016-12-23 17:53:03 +00:00
|
|
|
|
2017-04-20 22:56:03 +00:00
|
|
|
// notify the network
|
2017-04-24 21:33:59 +00:00
|
|
|
struct HashtableNode *htn = *parent_node;
|
|
|
|
local_node->routing->Provide(local_node->routing, htn->hash, htn->hash_size);
|
2017-08-31 11:41:54 +00:00
|
|
|
// notify the network of the subnodes too
|
2017-04-24 21:33:59 +00:00
|
|
|
struct NodeLink *nl = htn->head_link;
|
|
|
|
while (nl != NULL) {
|
|
|
|
local_node->routing->Provide(local_node->routing, nl->hash, nl->hash_size);
|
|
|
|
nl = nl->next;
|
|
|
|
}
|
2017-04-20 22:56:03 +00:00
|
|
|
|
2016-12-12 11:27:06 +00:00
|
|
|
return 1;
|
|
|
|
}
|
2016-12-21 13:08:44 +00:00
|
|
|
|
2017-04-07 00:05:30 +00:00
|
|
|
/**
|
|
|
|
* Pulls list of files from command line parameters
|
|
|
|
* @param argc number of command line parameters
|
|
|
|
* @param argv command line parameters
|
|
|
|
* @returns a FileList linked list of filenames
|
|
|
|
*/
|
2017-09-26 14:43:10 +00:00
|
|
|
struct FileList* ipfs_import_get_filelist(struct CliArguments* args) {
|
2017-04-07 00:05:30 +00:00
|
|
|
struct FileList* first = NULL;
|
|
|
|
struct FileList* last = NULL;
|
|
|
|
|
2017-09-26 14:43:10 +00:00
|
|
|
for (int i = args->verb_index + 1; i < args->argc; i++) {
|
|
|
|
if (strcmp(args->argv[i], "add") == 0) {
|
2017-04-07 00:05:30 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
struct FileList* current = (struct FileList*)malloc(sizeof(struct FileList));
|
2017-10-05 20:14:47 +00:00
|
|
|
if (current == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-04-07 00:05:30 +00:00
|
|
|
current->next = NULL;
|
2017-09-26 14:43:10 +00:00
|
|
|
current->file_name = args->argv[i];
|
2017-04-07 00:05:30 +00:00
|
|
|
// now wire it in
|
|
|
|
if (first == NULL) {
|
|
|
|
first = current;
|
|
|
|
}
|
|
|
|
if (last != NULL) {
|
|
|
|
last->next = current;
|
|
|
|
}
|
|
|
|
// now set last to current
|
|
|
|
last = current;
|
|
|
|
}
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* See if the recursive flag was passed on the command line
|
|
|
|
* @param argc number of command line parameters
|
|
|
|
* @param argv command line parameters
|
|
|
|
* @returns true(1) if -r was passed, false(0) otherwise
|
|
|
|
*/
|
|
|
|
int ipfs_import_is_recursive(int argc, char** argv) {
|
|
|
|
for(int i = 0; i < argc; i++) {
|
|
|
|
if (strcmp(argv[i], "-r") == 0)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2016-12-28 02:39:58 +00:00
|
|
|
|
2016-12-21 13:08:44 +00:00
|
|
|
/**
|
2016-12-28 02:39:58 +00:00
|
|
|
* called from the command line to import multiple files or directories
|
2016-12-21 13:08:44 +00:00
|
|
|
* @param argc the number of arguments
|
|
|
|
* @param argv the arguments
|
|
|
|
*/
|
2017-09-26 14:43:10 +00:00
|
|
|
int ipfs_import_files(struct CliArguments* args) {
|
2016-12-21 13:08:44 +00:00
|
|
|
/*
|
|
|
|
* Param 0: ipfs
|
|
|
|
* param 1: add
|
2016-12-28 02:39:58 +00:00
|
|
|
* param 2: -r (optional)
|
|
|
|
* param 3: directoryname
|
2016-12-21 13:08:44 +00:00
|
|
|
*/
|
2017-04-20 22:56:03 +00:00
|
|
|
struct IpfsNode* local_node = NULL;
|
|
|
|
char* repo_path = NULL;
|
|
|
|
int retVal = 0;
|
2017-04-27 20:52:20 +00:00
|
|
|
struct FileList* first = NULL;
|
|
|
|
struct FileList* current = NULL;
|
|
|
|
char* path = NULL;
|
|
|
|
char* filename = NULL;
|
|
|
|
struct HashtableNode* directory_entry = NULL;
|
2017-04-20 22:56:03 +00:00
|
|
|
|
2017-09-26 14:43:10 +00:00
|
|
|
int recursive = ipfs_import_is_recursive(args->argc, args->argv);
|
2016-12-28 02:39:58 +00:00
|
|
|
|
|
|
|
// parse the command line
|
2017-09-26 14:43:10 +00:00
|
|
|
first = ipfs_import_get_filelist(args);
|
2016-12-21 13:08:44 +00:00
|
|
|
|
|
|
|
// open the repo
|
2017-09-26 14:43:10 +00:00
|
|
|
if (!ipfs_repo_get_directory(args->argc, args->argv, &repo_path)) {
|
2017-04-06 22:46:40 +00:00
|
|
|
fprintf(stderr, "Repo does not exist: %s\n", repo_path);
|
2017-04-27 20:52:20 +00:00
|
|
|
goto exit;
|
2016-12-21 13:08:44 +00:00
|
|
|
}
|
2017-09-26 14:43:10 +00:00
|
|
|
ipfs_node_offline_new(repo_path, &local_node);
|
2017-04-06 22:46:40 +00:00
|
|
|
|
2017-10-04 14:36:38 +00:00
|
|
|
/** disabling for the time being
|
|
|
|
if (local_node->mode == MODE_API_AVAILABLE) {
|
|
|
|
// do this through the API
|
|
|
|
struct HttpRequest* request = ipfs_core_http_request_new();
|
|
|
|
request->command = "add";
|
|
|
|
struct HttpParam* recursive_param = ipfs_core_http_param_new();
|
|
|
|
recursive_param->name = strdup("recursive");
|
|
|
|
recursive_param->value = strdup((recursive ? "true" : "false"));
|
|
|
|
libp2p_utils_vector_add(request->params, recursive_param);
|
|
|
|
current = first;
|
|
|
|
while (current != NULL) {
|
|
|
|
libp2p_utils_vector_add(request->arguments, current->file_name);
|
|
|
|
current = current->next;
|
|
|
|
}
|
|
|
|
uint8_t* result = NULL;
|
|
|
|
size_t result_size = 0;
|
|
|
|
if (!ipfs_core_http_request_post(local_node, request, &result, &result_size, data, data_size)) {
|
2016-12-21 13:08:44 +00:00
|
|
|
|
2017-10-04 14:36:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
*/
|
|
|
|
// No daemon is running. Do this without using the API
|
|
|
|
// import the file(s)
|
|
|
|
current = first;
|
|
|
|
while (current != NULL) {
|
|
|
|
if (current->file_name[0] != '-') { // not a switch
|
|
|
|
os_utils_split_filename(current->file_name, &path, &filename);
|
|
|
|
size_t bytes_written = 0;
|
|
|
|
if (!ipfs_import_file(NULL, current->file_name, &directory_entry, local_node, &bytes_written, recursive))
|
|
|
|
goto exit;
|
|
|
|
ipfs_import_print_node_results(directory_entry, filename);
|
|
|
|
// cleanup
|
|
|
|
if (path != NULL) {
|
|
|
|
free(path);
|
|
|
|
path = NULL;
|
|
|
|
}
|
|
|
|
if (filename != NULL) {
|
|
|
|
free(filename);
|
|
|
|
filename = NULL;
|
|
|
|
}
|
|
|
|
if (directory_entry != NULL) {
|
|
|
|
ipfs_hashtable_node_free(directory_entry);
|
|
|
|
directory_entry = NULL;
|
|
|
|
}
|
2017-09-28 18:21:34 +00:00
|
|
|
}
|
2017-10-04 14:36:38 +00:00
|
|
|
current = current->next;
|
2017-04-27 20:52:20 +00:00
|
|
|
}
|
2017-10-04 14:36:38 +00:00
|
|
|
// } uncomment this line when the api is up and running with file transfer
|
2016-12-21 13:08:44 +00:00
|
|
|
|
2017-04-27 20:52:20 +00:00
|
|
|
retVal = 1;
|
|
|
|
exit:
|
|
|
|
if (local_node != NULL)
|
2017-09-25 13:55:42 +00:00
|
|
|
ipfs_node_free(local_node);
|
2016-12-28 02:39:58 +00:00
|
|
|
// free file list
|
|
|
|
current = first;
|
|
|
|
while (current != NULL) {
|
|
|
|
first = current->next;
|
|
|
|
free(current);
|
|
|
|
current = first;
|
|
|
|
}
|
2017-04-27 20:52:20 +00:00
|
|
|
if (path != NULL)
|
|
|
|
free(path);
|
|
|
|
if (filename != NULL)
|
|
|
|
free(filename);
|
|
|
|
if (directory_entry != NULL)
|
|
|
|
ipfs_hashtable_node_free(directory_entry);
|
2017-09-26 14:43:10 +00:00
|
|
|
//if (repo_path != NULL)
|
|
|
|
// free(repo_path);
|
2016-12-21 13:08:44 +00:00
|
|
|
return retVal;
|
|
|
|
}
|
2016-12-29 03:45:35 +00:00
|
|
|
|