Merge pull request #1676 from markwalet/improve-proxmox
Improve proxmox
This commit is contained in:
commit
6e581913ea
2 changed files with 27 additions and 11 deletions
|
@ -290,6 +290,7 @@ export function cleanServiceGroups(groups) {
|
|||
enableNowPlaying,
|
||||
volume, // diskstation widget,
|
||||
enableQueue, // sonarr/radarr
|
||||
node, // Proxmox
|
||||
} = cleanedService.widget;
|
||||
|
||||
let fieldsList = fields;
|
||||
|
@ -300,7 +301,7 @@ export function cleanServiceGroups(groups) {
|
|||
fieldsList = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
cleanedService.widget = {
|
||||
type,
|
||||
fields: fieldsList || null,
|
||||
|
@ -323,6 +324,9 @@ export function cleanServiceGroups(groups) {
|
|||
if (type === "unifi") {
|
||||
if (site) cleanedService.widget.site = site;
|
||||
}
|
||||
if (type === "proxmox") {
|
||||
if (node) cleanedService.widget.node = node;
|
||||
}
|
||||
if (type === "kubernetes") {
|
||||
if (namespace) cleanedService.widget.namespace = namespace;
|
||||
if (app) cleanedService.widget.app = app;
|
||||
|
|
|
@ -4,6 +4,7 @@ import Container from "components/services/widget/container";
|
|||
import Block from "components/services/widget/block";
|
||||
import useWidgetAPI from "utils/proxy/use-widget-api";
|
||||
|
||||
|
||||
function calcRunning(total, current) {
|
||||
return current.status === "running" ? total + 1 : total;
|
||||
}
|
||||
|
@ -25,29 +26,40 @@ export default function Component({ service }) {
|
|||
<Block label="proxmox.vms" />
|
||||
<Block label="proxmox.lxc" />
|
||||
<Block label="resources.cpu" />
|
||||
<Block label="resources.ram" />
|
||||
<Block label="resources.mem" />
|
||||
</Container>
|
||||
);
|
||||
}
|
||||
|
||||
const { data } = clusterData ;
|
||||
const vms = data.filter(item => item.type === "qemu" && item.template === 0) || [];
|
||||
const lxc = data.filter(item => item.type === "lxc" && item.template === 0) || [];
|
||||
const nodes = data.filter(item => item.type === "node") || [];
|
||||
|
||||
const vms = data.filter(item => item.type === "qemu" && item.template === 0 && (widget.node === undefined || widget.node === item.node)) || [];
|
||||
const lxc = data.filter(item => item.type === "lxc" && item.template === 0 && (widget.node === undefined || widget.node === item.node)) || [];
|
||||
const nodes = data.filter(item => item.type === "node" && (widget.node === undefined || widget.node === item.node)) || [];
|
||||
const runningVMs = vms.reduce(calcRunning, 0);
|
||||
const runningLXC = lxc.reduce(calcRunning, 0);
|
||||
|
||||
// TODO: support more than one node
|
||||
// TODO: better handling of cluster with zero nodes
|
||||
const node = nodes.length > 0 ? nodes[0] : { cpu: 0.0, mem: 0, maxmem: 0 };
|
||||
if (nodes.length === 0) {
|
||||
return (
|
||||
<Container service={service}>
|
||||
<Block label="proxmox.vms" value={`${runningVMs} / ${vms.length}`} />
|
||||
<Block label="proxmox.lxc" value={`${runningLXC} / ${lxc.length}`} />
|
||||
<Block label="resources.cpu" />
|
||||
<Block label="resources.mem" />
|
||||
</Container>
|
||||
);
|
||||
}
|
||||
|
||||
const maxMemory = nodes.reduce((sum, n) => n.maxmem + sum, 0);
|
||||
const usedMemory = nodes.reduce((sum, n) => n.mem + sum, 0);
|
||||
const maxCpu = nodes.reduce((sum, n) => n.maxcpu + sum, 0);
|
||||
const usedCpu = nodes.reduce((sum, n) => (n.cpu * n.maxcpu) + sum, 0);
|
||||
|
||||
return (
|
||||
<Container service={service}>
|
||||
<Block label="proxmox.vms" value={`${runningVMs} / ${vms.length}`} />
|
||||
<Block label="proxmox.lxc" value={`${runningLXC} / ${lxc.length}`} />
|
||||
<Block label="resources.cpu" value={t("common.percent", { value: (node.cpu * 100) })} />
|
||||
<Block label="resources.mem" value={t("common.percent", { value: ((node.mem / node.maxmem) * 100) })} />
|
||||
<Block label="resources.cpu" value={t("common.percent", { value: ((usedCpu / maxCpu) * 100) })} />
|
||||
<Block label="resources.mem" value={t("common.percent", { value: ((usedMemory / maxMemory) * 100) })} />
|
||||
</Container>
|
||||
);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue