Skip to content

Commit 74ee71a

Browse files
committed
fix scheduler logics
1 parent 00196a7 commit 74ee71a

File tree

13 files changed

+308
-89
lines changed

13 files changed

+308
-89
lines changed

README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,8 @@ kubectl delete cluster cappx-test
6363

6464
- Supports custom cloud-config (user data). CAPPX uses VNC websockert for bootstrapping nodes so it can applies custom cloud-config that can not be achieved by only Proxmox API.
6565

66+
- Flexible vmid/node assigning. You can flexibly assign vmid to your qemu and flexibly schedule qemus to proxmox nodes. For more details please check [qemu-scheduler](./cloud/scheduler/).
67+
6668
### Node Images
6769

6870
CAPPX is compatible with `iso`, `qcow2`, `qed`, `raw`, `vdi`, `vpc`, `vmdk` format of image. You can build your own node image and use it for `ProxmoxMachine`.

cloud/scheduler/README.md

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,38 @@
22

33
Scheduling refers to making sure that VM(QEMU) are matched to Proxmox Nodes.
44

5+
## How qemu-scheduler select proxmox node to run qemu
6+
7+
Basic flow of the node selection process is `filter => score => select one node which has highest score`
8+
9+
### Filter Plugins
10+
11+
Filter plugins filter the node based on nodename, overcommit ratio etc.
12+
13+
#### regex plugin
14+
15+
Regex plugin is a one of the default Filter Plugin of qemu-scheduler. You can specify node name as regex format.
16+
```sh
17+
key: node.qemu-scheduler/reges
18+
value(example): node[0-9]+
19+
```
20+
21+
### Score Plugins
22+
23+
Score plugins score the nodes based on resource etc.
24+
525
## How to specify vmid
626
qemu-scheduler reads context and find key registerd to scheduler. If the context has any value of the registerd key, qemu-scheduler uses the plugin that matchies the key.
727

828
### Range Plugin
929
You can specify vmid range with `(start id)-(end id)` format.
10-
```
30+
```sh
1131
key: vmid.qemu-scheduler/range
1232
value(example): 100-150
1333
```
1434

1535
### Regex Plugin
16-
```
36+
```sh
1737
key: vmid.qemu-scheduler/regex
1838
value(example): (12[0-9]|130)
1939
```
@@ -43,5 +63,5 @@ spec:
4363
template:
4464
metadata:
4565
annotations:
46-
vmid.qemu-scheduler/range: 100-150 # this annotation will be propagated to your ProxmoxMachine via MachineSet
66+
node.qemu-scheduler/regex: node[0-9]+ # this annotation will be propagated to your ProxmoxMachine via MachineSet
4767
```

cloud/scheduler/framework/cycle_state.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,10 +41,10 @@ func (c *CycleState) QEMU() *api.VirtualMachine {
4141
return c.result.instance.VM
4242
}
4343

44-
func (c *CycleState) UpdateState(completed bool, err error, result *SchedulerResult) {
44+
func (c *CycleState) UpdateState(completed bool, err error, result SchedulerResult) {
4545
c.completed = completed
4646
c.err = err
47-
c.result = *result
47+
c.result = result
4848
}
4949

5050
func NewSchedulerResult(vmid int, node string, instance *proxmox.VirtualMachine) SchedulerResult {

cloud/scheduler/framework/types.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,10 @@ func (n NodeInfo) Node() *api.Node {
7777
return n.node
7878
}
7979

80+
func (n NodeInfo) QEMUs() []*api.VirtualMachine {
81+
return n.qemus
82+
}
83+
8084
// NodeScoreList declares a list of nodes and their scores.
8185
type NodeScoreList []NodeScore
8286

cloud/scheduler/plugins/names/names.go

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,24 @@ package names
33
// node plugins
44
const (
55
// filter plugins
6-
6+
// filter by node name
77
NodeName = "NodeName"
8+
// filter by node name regex
9+
NodeRegex = "NodeRegex"
10+
// filter by cpu overcommit ratio
11+
CPUOvercommit = "CPUOvercommit"
12+
// filter by memory overcommit ratio
13+
MemoryOvercommit = "MemoryOvercommit"
814

915
// score plugins
10-
16+
// random score
1117
Random = "Random"
18+
// resource utilization score
19+
NodeResource = "NodeResource"
1220

1321
// vmid plugins
14-
22+
// select by range
1523
Range = "Range"
24+
// select by regex
1625
Regex = "Regex"
1726
)
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
package noderesource
2+
3+
import (
4+
"context"
5+
6+
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/framework"
7+
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/plugins/names"
8+
"github.com/sp-yduck/proxmox-go/api"
9+
)
10+
11+
type NodeResource struct{}
12+
13+
var _ framework.NodeScorePlugin = &NodeResource{}
14+
15+
const (
16+
Name = names.NodeResource
17+
)
18+
19+
func (pl *NodeResource) Name() string {
20+
return Name
21+
}
22+
23+
// score = 1/(cpu/maxcpu * mem/maxmem)
24+
func (pl *NodeResource) Score(ctx context.Context, state *framework.CycleState, _ api.VirtualMachineCreateOptions, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
25+
cpu := nodeInfo.Node().Cpu
26+
maxCPU := nodeInfo.Node().MaxCpu
27+
mem := nodeInfo.Node().Mem
28+
maxMem := nodeInfo.Node().MaxMem
29+
u := cpu / float32(maxCPU) * float32(mem/maxMem)
30+
score := int64(1 / u)
31+
return score, nil
32+
}
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
package overcommit
2+
3+
import (
4+
"context"
5+
6+
"github.com/sp-yduck/proxmox-go/api"
7+
8+
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/framework"
9+
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/plugins/names"
10+
)
11+
12+
type CPUOvercommit struct{}
13+
14+
var _ framework.NodeFilterPlugin = &CPUOvercommit{}
15+
16+
const (
17+
CPUOvercommitName = names.CPUOvercommit
18+
defaultCPUOvercommitRatio = 4
19+
)
20+
21+
func (pl *CPUOvercommit) Name() string {
22+
return CPUOvercommitName
23+
}
24+
25+
// filter by cpu overcommit ratio
26+
func (pl *CPUOvercommit) Filter(ctx context.Context, _ *framework.CycleState, config api.VirtualMachineCreateOptions, nodeInfo *framework.NodeInfo) *framework.Status {
27+
cpu := sumCPUs(nodeInfo.QEMUs())
28+
maxCPU := nodeInfo.Node().MaxCpu
29+
sockets := config.Sockets
30+
if sockets == 0 {
31+
sockets = 1
32+
}
33+
ratio := float32(cpu+config.Cores*sockets) / float32(maxCPU)
34+
if ratio > defaultCPUOvercommitRatio {
35+
status := framework.NewStatus()
36+
status.SetCode(1)
37+
return status
38+
}
39+
return &framework.Status{}
40+
}
41+
42+
func sumCPUs(qemus []*api.VirtualMachine) int {
43+
var result int
44+
for _, q := range qemus {
45+
result += q.Cpus
46+
}
47+
return result
48+
}
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
package overcommit
2+
3+
import (
4+
"context"
5+
6+
"github.com/sp-yduck/proxmox-go/api"
7+
8+
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/framework"
9+
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/plugins/names"
10+
)
11+
12+
type MemoryOvercommit struct{}
13+
14+
var _ framework.NodeFilterPlugin = &MemoryOvercommit{}
15+
16+
const (
17+
MemoryOvercommitName = names.MemoryOvercommit
18+
defaultMemoryOvercommitRatio = 1
19+
)
20+
21+
func (pl *MemoryOvercommit) Name() string {
22+
return MemoryOvercommitName
23+
}
24+
25+
// filter by memory overcommit ratio
26+
func (pl *MemoryOvercommit) Filter(ctx context.Context, _ *framework.CycleState, config api.VirtualMachineCreateOptions, nodeInfo *framework.NodeInfo) *framework.Status {
27+
mem := sumMems(nodeInfo.QEMUs())
28+
maxMem := nodeInfo.Node().MaxMem
29+
ratio := float32(mem+1024*1024*config.Memory) / float32(maxMem)
30+
if ratio >= defaultMemoryOvercommitRatio {
31+
status := framework.NewStatus()
32+
status.SetCode(1)
33+
return status
34+
}
35+
return &framework.Status{}
36+
}
37+
38+
func sumMems(qemus []*api.VirtualMachine) int {
39+
var result int
40+
for _, q := range qemus {
41+
result += q.MaxMem
42+
}
43+
return result
44+
}
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
package regex
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"regexp"
7+
8+
"github.com/sp-yduck/proxmox-go/api"
9+
10+
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/framework"
11+
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/plugins/names"
12+
)
13+
14+
type NodeRegex struct{}
15+
16+
var _ framework.NodeFilterPlugin = &NodeRegex{}
17+
18+
const (
19+
NodeRegexName = names.NodeRegex
20+
NodeRegexKey = "node.qemu-scheduler/regex"
21+
)
22+
23+
func (pl *NodeRegex) Name() string {
24+
return NodeRegexName
25+
}
26+
27+
// regex is specified in ctx value (key=node.qemu-scheduler/regex)
28+
func (pl *NodeRegex) Filter(ctx context.Context, _ *framework.CycleState, config api.VirtualMachineCreateOptions, nodeInfo *framework.NodeInfo) *framework.Status {
29+
reg, err := findNodeRegex(ctx)
30+
if err != nil {
31+
32+
return &framework.Status{}
33+
}
34+
if !reg.MatchString(nodeInfo.Node().Node) {
35+
status := framework.NewStatus()
36+
status.SetCode(1)
37+
return status
38+
}
39+
return &framework.Status{}
40+
}
41+
42+
// specify available node name as regex
43+
// example: node.qemu-scheduler/regex=node[0-9]+
44+
func findNodeRegex(ctx context.Context) (*regexp.Regexp, error) {
45+
value := ctx.Value(framework.CtxKey(NodeRegexKey))
46+
if value == nil {
47+
return nil, fmt.Errorf("no node name regex is specified")
48+
}
49+
reg, err := regexp.Compile(fmt.Sprintf("%s", value))
50+
if err != nil {
51+
return nil, err
52+
}
53+
return reg, nil
54+
}

0 commit comments

Comments
 (0)