WIP: single node Ceph on bm-5 #157

Draft
raito wants to merge 2 commits from single-node-ceph into main
2 changed files with 132 additions and 0 deletions
Showing only changes of commit 36290942ea - Show all commits

73
services/ceph/default.nix Normal file
View file

@ -0,0 +1,73 @@
{ config, lib, ... }:
let
inherit (lib) mkOption types;
cfg = config.bagel.ceph;
in
{
options.bagel.ceph = {
clusterName = mkOption {
type = types.str;
description = "Cluster name";
};
fsid = mkOption {
type = types.str;
description = "Filesystem ID for the whole cluster";
};
region = mkOption {
type = types.enum [ "wob01" ];
description = "Ceph cluster region identifier";
};
pools = mkOption {
type = types.attrsOf types.submodule ({ ... }: {
pg_num = lib.mkOption {
type = types.int;
};
pgp_num = lib.mkOption {
type = types.int;
};
});
description = "List of Ceph pools for which this module will try to maintain their options";
};
enableSingleNode = mkOption {
type = types.bool;
default = false;
description = ''
By default, Ceph does not allow single node deployments.
Enabling this will modify some options of the cluster to permit this configuration.
'';
};
# In the future, we may put monitors on other hosts, etc.
# For now, we are only catering to the hyperconverged usecase: everything on the same machine.
# Note that our deployment only caters to RGW for now.
# RBD may be interesting if we end up doing virtualization on the top of it for VM HAs.
};
imports = [
./hyperconverged.nix
];
config = {
ceph.pools = {
".rgw.root" = {};
# RGW-specific to this region pools inside of this specific cluster.
"${cfg.region}.rgw.control" = {};
"${cfg.region}.rgw.meta" = {};
"${cfg.region}.rgw.index" = {};
"${cfg.region}.rgw.log" = {};
"${cfg.region}.rgw.buckets.index" = {
};
"${cfg.region}.rgw.buckets.log" = {};
};
};
}

View file

@ -0,0 +1,59 @@
{ config, lib, ... }:
let
inherit (lib) mkIf mkEnableOption types mkOption;
globalCfg = config.bagel.ceph;
cfg = config.bagel.ceph.hyperconverged;
# Partition things by fsid across all nodes.
in
{
options.bagel.ceph.hyperconverged = {
enable = mkEnableOption "an hyperconverged Ceph node on this system";
osds = mkOption {
type = types.attrsOf types.submodule ({ ... }: {
options = {
crushStorageClass = mkOption {
type = types.enum [ "hdd" "ssd" "nvme" ];
};
};
});
};
};
config = mkIf cfg.enable {
services.ceph = {
enable = true;
global = {
inherit (globalCfg) fsid;
};
mon = {
enable = true;
daemons = [];
};
mgr = {
enable = true;
daemons = [];
};
osd = {
enable = true;
daemons = [];
};
rgw = {
enable = true;
daemons = [ ];
};
};
# TODO: convergence logic to build up.
# Automatic provisioning of all MONs.
# Automatic provisioning of all OSDs.
# Automatic provisioning of all RGWs.
# Ensure that pools exist with the right crush rules.
# Activation unit:
# Automatic activation of all valid OSDs.
};
}