Skip to content

Commit

Permalink
NC | Config Directory Restructure
Browse files Browse the repository at this point in the history
Signed-off-by: Romy <[email protected]>
  • Loading branch information
romayalon committed Aug 19, 2024
1 parent 46ccb82 commit e2285af
Show file tree
Hide file tree
Showing 31 changed files with 1,537 additions and 921 deletions.
72 changes: 35 additions & 37 deletions src/cmd/manage_nsfs.js
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ const minimist = require('minimist');
const config = require('../../config');
const P = require('../util/promise');
const nb_native = require('../util/nb_native');
const { ConfigFS, JSON_SUFFIX } = require('../sdk/config_fs');
const { ConfigFS } = require('../sdk/config_fs');
const cloud_utils = require('../util/cloud_utils');
const native_fs_utils = require('../util/native_fs_utils');
const mongo_utils = require('../util/mongo_utils');
Expand Down Expand Up @@ -387,8 +387,8 @@ async function add_account(data) {
await manage_nsfs_validations.validate_account_args(config_fs, data, ACTIONS.ADD, undefined);

const access_key = has_access_keys(data.access_keys) ? data.access_keys[0].access_key : undefined;
const name_exists = await config_fs.is_account_exists({ name: data.name });
const access_key_exists = access_key && await config_fs.is_account_exists({ access_key });
const name_exists = await config_fs.is_account_exists_by_name(data.name);
const access_key_exists = access_key && await config_fs.is_account_exists_by_access_key(access_key);

const event_arg = data.name ? data.name : access_key;
if (name_exists || access_key_exists) {
Expand All @@ -405,7 +405,7 @@ async function add_account(data) {
// for validating against the schema we need an object, hence we parse it back to object
const account = encrypted_data ? JSON.parse(encrypted_data) : data;
nsfs_schema_utils.validate_account_schema(account);
await config_fs.create_account_config_file(data.name, account, true);
await config_fs.create_account_config_file(account);
write_stdout_response(ManageCLIResponse.AccountCreated, data, { account: event_arg });
}

Expand Down Expand Up @@ -434,7 +434,7 @@ async function update_account(data, is_flag_iam_operate_on_root_account) {
// for validating against the schema we need an object, hence we parse it back to object
const account = encrypted_data ? JSON.parse(encrypted_data) : data;
nsfs_schema_utils.validate_account_schema(account);
await config_fs.update_account_config_file(data.name, account, undefined, undefined);
await config_fs.update_account_config_file(account);
write_stdout_response(ManageCLIResponse.AccountUpdated, data);
return;
}
Expand All @@ -446,9 +446,9 @@ async function update_account(data, is_flag_iam_operate_on_root_account) {
secret_key: data.access_keys[0].secret_key,
};

const name_exists = update_name && await config_fs.is_account_exists({ name: data.name });
const name_exists = update_name && await config_fs.is_account_exists_by_name(data.name, undefined);
const access_key_exists = update_access_key &&
await config_fs.is_account_exists({ access_key: data.access_keys[0].access_key.unwrap() });
await config_fs.is_account_exists_by_access_key(data.access_keys[0].access_key.unwrap());

if (name_exists || access_key_exists) {
const err_code = name_exists ? ManageCLIError.AccountNameAlreadyExists : ManageCLIError.AccountAccessKeyAlreadyExists;
Expand All @@ -465,18 +465,17 @@ async function update_account(data, is_flag_iam_operate_on_root_account) {
// for validating against the schema we need an object, hence we parse it back to object
const parsed_data = JSON.parse(encrypted_data);
nsfs_schema_utils.validate_account_schema(parsed_data);
if (update_name) {
await config_fs.create_account_config_file(new_name, parsed_data, true, [cur_access_key]);
await config_fs.delete_account_config_file(cur_name, data.access_keys);
} else if (update_access_key) {
await config_fs.update_account_config_file(cur_name, parsed_data, parsed_data.access_keys, [cur_access_key]);
}
await config_fs.update_account_config_file(parsed_data, {
old_name: update_name && cur_name,
new_access_keys_to_link: update_access_key && parsed_data.access_keys,
access_keys_to_delete: update_access_key && [{ access_key: cur_access_key }]
});
write_stdout_response(ManageCLIResponse.AccountUpdated, data);
}

async function delete_account(data) {
await manage_nsfs_validations.validate_account_args(config_fs, data, ACTIONS.DELETE, undefined);
await config_fs.delete_account_config_file(data.name, data.access_keys);
await config_fs.delete_account_config_file(data);
write_stdout_response(ManageCLIResponse.AccountDeleted, '', { account: data.name });
}

Expand Down Expand Up @@ -578,19 +577,10 @@ function filter_bucket(bucket, filters) {
* @param {object} [filters]
*/
async function list_config_files(type, wide, show_secrets, filters = {}) {
let entries;
// in case we have a filter by name, we don't need to read all the entries and iterate them
// instead we "mock" the entries array to have one entry and it is the name by the filter (we add it for performance)
let entries = [];
const should_filter = Object.keys(filters).length > 0;
const is_filter_by_name = filters.name !== undefined;
if (is_filter_by_name) {
entries = [{'name': filters.name + JSON_SUFFIX}];
} else {
entries = type === TYPES.ACCOUNT ?
await config_fs.list_root_accounts() :
await config_fs.list_buckets();
}

const should_filter = Object.keys(filters).length > 0;
// decryption causing mkm initalization
// decrypt only if data has access_keys and show_secrets = true (no need to decrypt if show_secrets = false but should_filter = true)
const options = {
Expand All @@ -599,19 +589,27 @@ async function list_config_files(type, wide, show_secrets, filters = {}) {
silent_if_missing: true
};

// in case we have a filter by name, we don't need to read all the entries and iterate them
// instead we "mock" the entries array to have one entry and it is the name by the filter (we add it for performance)
if (is_filter_by_name) {
entries = [filters.name];
} else if (type === TYPES.ACCOUNT) {
entries = await config_fs.list_accounts();
} else if (type === TYPES.BUCKET) {
entries = await config_fs.list_buckets();
}

let config_files_list = await P.map_with_concurrency(10, entries, async entry => {
if (entry.name.endsWith(JSON_SUFFIX)) {
if (wide || should_filter) {
const data = type === TYPES.ACCOUNT ?
await config_fs.get_account_by_name(entry.name, options) :
await config_fs.get_bucket_by_name(entry.name, options);
if (!data) return undefined;
if (should_filter && !filter_list_item(type, data, filters)) return undefined;
// remove secrets on !show_secrets && should filter
return wide ? _.omit(data, show_secrets ? [] : ['access_keys']) : { name: entry.name.slice(0, entry.name.indexOf(JSON_SUFFIX)) };
} else {
return { name: entry.name.slice(0, entry.name.indexOf(JSON_SUFFIX)) };
}
if (wide || should_filter) {
const data = type === TYPES.ACCOUNT ?
await config_fs.get_account_by_name(entry, options) :
await config_fs.get_bucket_by_name(entry, options);
if (!data) return undefined;
if (should_filter && !filter_list_item(type, data, filters)) return undefined;
// remove secrets on !show_secrets && should filter
return wide ? _.omit(data, show_secrets ? [] : ['access_keys']) : { name: entry };
} else {
return { name: entry };
}
});
// it inserts undefined for the entry '.noobaa-config-nsfs' and we wish to remove it
Expand Down
5 changes: 0 additions & 5 deletions src/deploy/NVA_build/standalone_deploy_nsfs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,8 @@ function execute() {

# Please note that the command we use here are without "sudo" because we are running from the container with Root permissions
function main() {
# Add accounts to run ceph tests
execute "node src/cmd/manage_nsfs account add --name cephalt --new_buckets_path ${FS_ROOT_1} --uid 1000 --gid 1000" nsfs_cephalt.log
execute "node src/cmd/manage_nsfs account add --name cephtenant --new_buckets_path ${FS_ROOT_2} --uid 2000 --gid 2000" nsfs_cephtenant.log

# Start noobaa service
execute "node src/cmd/nsfs" nsfs.log

# Wait for sometime to process to start
sleep 10
}
Expand Down
20 changes: 10 additions & 10 deletions src/manage_nsfs/health.js
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ const nb_native = require('../util/nb_native');
const native_fs_utils = require('../util/native_fs_utils');
const { read_stream_join } = require('../util/buffer_utils');
const { make_https_request } = require('../util/http_utils');
const { JSON_SUFFIX } = require('../sdk/config_fs');
const { TYPES } = require('./manage_nsfs_constants');
const { get_boolean_or_string_value, throw_cli_error, write_stdout_response } = require('./manage_nsfs_cli_utils');
const { ManageCLIResponse } = require('./manage_nsfs_cli_responses');
Expand Down Expand Up @@ -327,7 +326,7 @@ class NSFSHealth {
config_root_type_exists = await this.config_fs.validate_config_dir_exists(config_dir_path);
} else if (type === TYPES.ACCOUNT) {
// TODO - handle iam accounts when directory structure changes - read_account_by_id
config_dir_path = this.config_fs.accounts_dir_path;
config_dir_path = this.config_fs.accounts_by_name_dir_path;
config_root_type_exists = await this.config_fs.validate_config_dir_exists(config_dir_path);
}
// TODO - this is not a good handling for that - we need to take it to an upper level
Expand All @@ -339,15 +338,16 @@ class NSFSHealth {
};
}

const entries = type === TYPES.BUCKET ?
await this.config_fs.list_buckets() :
await this.config_fs.list_root_accounts();

const config_files = entries.filter(entree => !native_fs_utils.isDirectory(entree) && entree.name.endsWith(JSON_SUFFIX));
let config_files;
if (type === TYPES.BUCKET) {
config_files = await this.config_fs.list_buckets();
} else {
config_files = await this.config_fs.list_accounts();
}
for (const config_file of config_files) {
// config_file get data or push error
const { config_data = undefined, err_obj = undefined } =
await this.get_config_file_data_or_error_object(type, config_file.name);
await this.get_config_file_data_or_error_object(type, config_file);
if (!config_data && err_obj) {
invalid_storages.push(err_obj.invalid_storage);
continue;
Expand Down Expand Up @@ -395,9 +395,9 @@ class NSFSHealth {
} catch (err) {
let err_code;
const config_file_path = type === TYPES.BUCKET ?
await this.config_fs.get_bucket_path_by_name(config_file_name) :
this.config_fs.get_bucket_path_by_name(config_file_name) :
// TODO - should be changed to id when moving to new structure for supporting iam accounts
await this.config_fs.get_account_path_by_name(config_file_name);
this.config_fs.get_account_path_by_name(config_file_name);

if (err.code === 'ENOENT') {
dbg.log1(`Error: Config file path should be a valid path`, config_file_path, err);
Expand Down
49 changes: 20 additions & 29 deletions src/manage_nsfs/manage_nsfs_validations.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,9 @@

const config = require('../../config');
const dbg = require('../util/debug_module')(__filename);
const path = require('path');
const net = require('net');
const P = require('../util/promise');
const nb_native = require('../util/nb_native');
const string_utils = require('../util/string_utils');
const { JSON_SUFFIX } = require('../sdk/config_fs');
const native_fs_utils = require('../util/native_fs_utils');
const ManageCLIError = require('../manage_nsfs/manage_nsfs_cli_errors').ManageCLIError;
const bucket_policy_utils = require('../endpoint/s3/s3_bucket_policy_utils');
Expand Down Expand Up @@ -354,7 +351,7 @@ async function validate_bucket_args(config_fs, data, action) {
if (data.s3_policy) {
try {
await bucket_policy_utils.validate_s3_policy(data.s3_policy, data.name,
async principal => config_fs.is_account_exists({ name: principal })
async principal => config_fs.is_account_exists_by_name(principal, account.owner)
);
} catch (err) {
dbg.error('validate_bucket_args invalid bucket policy err:', err);
Expand Down Expand Up @@ -482,16 +479,14 @@ function _validate_access_keys(access_key, secret_key) {
* @param {string} account_name
*/
async function validate_account_not_owns_buckets(config_fs, account_name) {
const entries = await config_fs.list_buckets();
await P.map_with_concurrency(10, entries, async entry => {
if (entry.name.endsWith(JSON_SUFFIX)) {
const data = await config_fs.get_bucket_by_name(entry.name, { silent_if_missing: true });
if (data && data.bucket_owner === account_name) {
const detail_msg = `Account ${account_name} has bucket ${data.name}`;
throw_cli_error(ManageCLIError.AccountDeleteForbiddenHasBuckets, detail_msg);
}
return data;
const bucket_names = await config_fs.list_buckets();
await P.map_with_concurrency(10, bucket_names, async bucket_name => {
const data = await config_fs.get_bucket_by_name(bucket_name, { silent_if_missing: true });
if (data && data.bucket_owner === account_name) {
const detail_msg = `Account ${account_name} has bucket ${data.name}`;
throw_cli_error(ManageCLIError.AccountDeleteForbiddenHasBuckets, detail_msg);
}
return data;
});
}

Expand All @@ -503,24 +498,20 @@ async function validate_account_not_owns_buckets(config_fs, account_name) {
* @param {string} action
*/
async function check_if_root_account_does_not_have_IAM_users(config_fs, account_to_check, action) {
const fs_context = config_fs.fs_context;
const entries = await nb_native().fs.readdir(fs_context, config_fs.accounts_dir_path);
await P.map_with_concurrency(10, entries, async entry => {
if (entry.name.endsWith(JSON_SUFFIX)) {
const full_path = path.join(config_fs.accounts_dir_path, entry.name);
const account_data = await config_fs.get_config_data(full_path);
if (entry.name.includes(config.NSFS_TEMP_CONF_DIR_NAME)) return undefined;
const is_root_account_owns_user = check_root_account_owns_user(account_to_check, account_data);
if (is_root_account_owns_user) {
const detail_msg = `Account ${account_to_check.name} has IAM account ${account_data.name}`;
if (action === ACTIONS.DELETE) {
throw_cli_error(ManageCLIError.AccountDeleteForbiddenHasIAMAccounts, detail_msg);
}
// else it is called with action ACTIONS.UPDATE
throw_cli_error(ManageCLIError.AccountCannotBeRootAccountsManager, detail_msg);
// TODO - For supporting IAM, we need to check if {config_dir}/identities/{account_id}/users/ has anything inside
const account_names = await config_fs.list_accounts();
await P.map_with_concurrency(10, account_names, async account_name => {
const account_data = await config_fs.get_account_by_name(account_name);
const is_root_account_owns_user = check_root_account_owns_user(account_to_check, account_data);
if (is_root_account_owns_user) {
const detail_msg = `Account ${account_to_check.name} has IAM account ${account_data.name}`;
if (action === ACTIONS.DELETE) {
throw_cli_error(ManageCLIError.AccountDeleteForbiddenHasIAMAccounts, detail_msg);
}
return account_data;
// else it is called with action ACTIONS.UPDATE
throw_cli_error(ManageCLIError.AccountCannotBeRootAccountsManager, detail_msg);
}
return account_data;
});
}

Expand Down
Loading

0 comments on commit e2285af

Please sign in to comment.