Skip to content

Commit ef8f925

Browse files
authored
Merge pull request #125 from sp-yduck/feature/plugin-config
Feature/plugin config
2 parents 3d65695 + 3693d03 commit ef8f925

File tree

9 files changed

+297
-43
lines changed

9 files changed

+297
-43
lines changed

Makefile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ build-e2e-image: ## Build cappx image to be used for e2e test
118118

119119
USE_EXISTING_CLUSTER := false
120120
.PHONY: e2e
121-
e2e: generate-e2e-templates build-e2e-image cleanup-e2e-artifacts ## Run e2e test
121+
e2e: generate-e2e-templates build-e2e-image cleanup-e2e-artifacts $(KUBECTL) ## Run e2e test
122122
go test $(E2E_DIR)/... -v \
123123
-timeout=$(GINKGO_TIMEOUT) \
124124
--e2e.artifacts-folder=$(E2E_DIR) \
@@ -184,7 +184,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified
184184
.PHONY: deploy
185185
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
186186
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
187-
$(KUSTOMIZE) build config/default | kubectl diff -f -
187+
$(KUSTOMIZE) build config/default | kubectl apply -f -
188188

189189
.PHONY: undeploy
190190
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.

cloud/scheduler/README.md

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,12 @@ Basic flow of the node selection process is `filter => score => select one node
88

99
### Filter Plugins
1010

11-
Filter plugins filter the node based on nodename, overcommit ratio etc.
11+
Filter plugins filter the node based on nodename, overcommit ratio etc. So that we can avoid to run qemus on not desired Proxmox nodes.
12+
13+
- [NodeName plugin](./plugins/nodename/node_name.go) (pass the node matching specified node name)
14+
- [CPUOvercommit plugin](./plugins/overcommit/cpu_overcommit.go) (pass the node that has enough cpu against running vm)
15+
- [MemoryOvercommit plugin](./plugins/overcommit/memory_overcommit.go) (pass the node that has enough memory against running vm)
16+
- [NodeRegex plugin](./plugins/regex/node_regex.go) (pass the node matching specified regex)
1217

1318
#### regex plugin
1419

@@ -20,11 +25,17 @@ value(example): node[0-9]+
2025

2126
### Score Plugins
2227

23-
Score plugins score the nodes based on resource etc.
28+
Score plugins score the nodes based on resource etc. So that we can run qemus on the most appropriate Proxmox node.
29+
30+
- [NodeResource plugin](./plugins/noderesource/node_resrouce.go) (nodes with more resources have higher scores)
31+
- [Random plugin](./plugins/random/random.go) (diabled by default. just a reference implementation of score plugin)
2432

2533
## How to specify vmid
2634
qemu-scheduler reads context and find key registerd to scheduler. If the context has any value of the registerd key, qemu-scheduler uses the plugin that matchies the key.
2735

36+
- [Range plugin](./plugins/idrange/idrange.go) (select minimum availabe vmid from the specified id range)
37+
- [VMIDRegex plugin](./plugins/regex/vmid_regex.go) (select minimum availabe vmid matching specified regex)
38+
2839
### Range Plugin
2940
You can specify vmid range with `(start id)-(end id)` format.
3041
```sh
@@ -64,4 +75,21 @@ spec:
6475
metadata:
6576
annotations:
6677
node.qemu-scheduler/regex: node[0-9]+ # this annotation will be propagated to your ProxmoxMachine via MachineSet
78+
```
79+
80+
## How to configure (or disable/enable) specific Plugins
81+
82+
By default, all the plugins are enabled. You can disable specific plugins via plugin-config. for CAPPX, check example ConfigMap [here](../../config/manager/manager.yaml)
83+
```sh
84+
# example plugin-config.yaml
85+
86+
# plugin type name (scores, filters, vmids)
87+
filters:
88+
CPUOvercommit:
89+
enable: false # disable
90+
MemoryOvercommit:
91+
enable: true # enable (can be omitted)
92+
vmids:
93+
Regex:
94+
enable: false # disable
6795
```
Lines changed: 91 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,10 @@
11
package plugins
22

33
import (
4+
"os"
5+
6+
"gopkg.in/yaml.v3"
7+
48
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/framework"
59
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/plugins/idrange"
610
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/plugins/nodename"
@@ -9,25 +13,105 @@ import (
913
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/plugins/regex"
1014
)
1115

12-
func NewNodeFilterPlugins() []framework.NodeFilterPlugin {
13-
return []framework.NodeFilterPlugin{
16+
type PluginConfigs struct {
17+
FilterPlugins map[string]PluginConfig `yaml:"filters,omitempty"`
18+
ScorePlugins map[string]PluginConfig `yaml:"scores,omitempty"`
19+
VMIDPlugins map[string]PluginConfig `yaml:"vmids,omitempty"`
20+
}
21+
22+
type PluginConfig struct {
23+
Enable bool `yaml:"enable,omitempty"`
24+
Config map[string]interface{} `yaml:"config,omitempty"`
25+
}
26+
27+
type PluginRegistry struct {
28+
filterPlugins []framework.NodeFilterPlugin
29+
scorePlugins []framework.NodeScorePlugin
30+
vmidPlugins []framework.VMIDPlugin
31+
}
32+
33+
func (r *PluginRegistry) FilterPlugins() []framework.NodeFilterPlugin {
34+
return r.filterPlugins
35+
}
36+
37+
func (r *PluginRegistry) ScorePlugins() []framework.NodeScorePlugin {
38+
return r.scorePlugins
39+
}
40+
41+
func (r *PluginRegistry) VMIDPlugins() []framework.VMIDPlugin {
42+
return r.vmidPlugins
43+
}
44+
45+
func NewRegistry(configs PluginConfigs) PluginRegistry {
46+
r := PluginRegistry{
47+
filterPlugins: NewNodeFilterPlugins(configs.FilterPlugins),
48+
scorePlugins: NewNodeScorePlugins(configs.ScorePlugins),
49+
vmidPlugins: NewVMIDPlugins(configs.VMIDPlugins),
50+
}
51+
return r
52+
}
53+
54+
func NewNodeFilterPlugins(config map[string]PluginConfig) []framework.NodeFilterPlugin {
55+
pls := []framework.NodeFilterPlugin{
1456
&nodename.NodeName{},
1557
&overcommit.CPUOvercommit{},
1658
&overcommit.MemoryOvercommit{},
1759
&regex.NodeRegex{},
1860
}
61+
plugins := []framework.NodeFilterPlugin{}
62+
for _, pl := range pls {
63+
c, ok := config[pl.Name()]
64+
if ok && !c.Enable {
65+
continue
66+
}
67+
plugins = append(plugins, pl)
68+
}
69+
return plugins
1970
}
2071

21-
func NewNodeScorePlugins() []framework.NodeScorePlugin {
22-
return []framework.NodeScorePlugin{
23-
// &random.Random{},
72+
func NewNodeScorePlugins(config map[string]PluginConfig) []framework.NodeScorePlugin {
73+
pls := []framework.NodeScorePlugin{
2474
&noderesource.NodeResource{},
2575
}
76+
plugins := []framework.NodeScorePlugin{}
77+
for _, pl := range pls {
78+
c, ok := config[pl.Name()]
79+
if ok && !c.Enable {
80+
continue
81+
}
82+
plugins = append(plugins, pl)
83+
}
84+
return plugins
2685
}
2786

28-
func NewVMIDPlugins() []framework.VMIDPlugin {
29-
return []framework.VMIDPlugin{
87+
func NewVMIDPlugins(config map[string]PluginConfig) []framework.VMIDPlugin {
88+
pls := []framework.VMIDPlugin{
3089
&idrange.Range{},
3190
&regex.Regex{},
3291
}
92+
plugins := []framework.VMIDPlugin{}
93+
for _, pl := range pls {
94+
c, ok := config[pl.Name()]
95+
if ok && !c.Enable {
96+
continue
97+
}
98+
plugins = append(plugins, pl)
99+
}
100+
return plugins
101+
}
102+
103+
// Read config file and unmarshal it to PluginConfig type
104+
func GetPluginConfigFromFile(path string) (PluginConfigs, error) {
105+
var config PluginConfigs
106+
if path == "" {
107+
return config, nil
108+
}
109+
b, err := os.ReadFile(path)
110+
if err != nil {
111+
return config, err
112+
}
113+
if err := yaml.Unmarshal(b, &config); err != nil {
114+
return config, err
115+
}
116+
return config, nil
33117
}
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
package plugins_test
2+
3+
import (
4+
"os"
5+
"testing"
6+
7+
. "github.com/onsi/ginkgo/v2"
8+
. "github.com/onsi/gomega"
9+
10+
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/plugins"
11+
)
12+
13+
func TestPlugins(t *testing.T) {
14+
RegisterFailHandler(Fail)
15+
RunSpecs(t, "Plugins Suite")
16+
}
17+
18+
var _ = Describe("GetPluginConfigFromFile", Label("unit", "scheduler"), func() {
19+
path := "./test-plugin-config.yaml"
20+
BeforeEach(func() {
21+
content := `scores:
22+
Random:
23+
enable: false`
24+
err := stringToFile(content, path)
25+
Expect(err).ToNot(HaveOccurred())
26+
})
27+
28+
AfterEach(func() {
29+
err := rm(path)
30+
Expect(err).NotTo(HaveOccurred())
31+
})
32+
33+
Context("with empty file path", func() {
34+
path := ""
35+
It("should not error", func() {
36+
config, err := plugins.GetPluginConfigFromFile(path)
37+
Expect(err).NotTo(HaveOccurred())
38+
Expect(config).To(Equal(plugins.PluginConfigs{}))
39+
})
40+
})
41+
42+
Context("with non-empty file path", func() {
43+
It("should not error", func() {
44+
config, err := plugins.GetPluginConfigFromFile(path)
45+
Expect(err).NotTo(HaveOccurred())
46+
scores := map[string]plugins.PluginConfig{}
47+
scores["Random"] = plugins.PluginConfig{Enable: false}
48+
Expect(config).To(Equal(plugins.PluginConfigs{ScorePlugins: scores}))
49+
})
50+
})
51+
52+
Context("with wrong file path", func() {
53+
It("shold error", func() {
54+
path := "./wrong-plugin-config.yaml"
55+
config, err := plugins.GetPluginConfigFromFile(path)
56+
Expect(err).To(HaveOccurred())
57+
Expect(config).To(Equal(plugins.PluginConfigs{}))
58+
})
59+
})
60+
})
61+
62+
func stringToFile(str string, path string) error {
63+
b := []byte(str)
64+
return os.WriteFile(path, b, 0666)
65+
}
66+
67+
func rm(path string) error {
68+
return os.Remove(path)
69+
}

cloud/scheduler/scheduler.go

Lines changed: 21 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -35,9 +35,15 @@ type Manager struct {
3535
}
3636

3737
// return manager with initialized scheduler-table
38-
func NewManager(params SchedulerParams) *Manager {
38+
func NewManager(params SchedulerParams) (*Manager, error) {
3939
table := make(map[schedulerID]*Scheduler)
40-
return &Manager{ctx: context.Background(), params: params, table: table}
40+
config, err := plugins.GetPluginConfigFromFile(params.PluginConfigFile)
41+
if err != nil {
42+
return nil, fmt.Errorf("failed to read plugin config: %v", err)
43+
}
44+
params.pluginconfigs = config
45+
params.Logger.Info(fmt.Sprintf("load plugin config: %v", config))
46+
return &Manager{ctx: context.Background(), params: params, table: table}, nil
4147
}
4248

4349
// return new/existing scheduler
@@ -70,9 +76,7 @@ func (m *Manager) NewScheduler(client *proxmox.Service, opts ...SchedulerOption)
7076
client: client,
7177
schedulingQueue: queue.New(),
7278

73-
filterPlugins: plugins.NewNodeFilterPlugins(),
74-
scorePlugins: plugins.NewNodeScorePlugins(),
75-
vmidPlugins: plugins.NewVMIDPlugins(),
79+
registry: plugins.NewRegistry(m.params.PluginConfigs()),
7680

7781
resultMap: make(map[string]chan *framework.CycleState),
7882
logger: m.params.Logger.WithValues("Name", "qemu-scheduler"),
@@ -122,9 +126,7 @@ type Scheduler struct {
122126
client *proxmox.Service
123127
schedulingQueue *queue.SchedulingQueue
124128

125-
filterPlugins []framework.NodeFilterPlugin
126-
scorePlugins []framework.NodeScorePlugin
127-
vmidPlugins []framework.VMIDPlugin
129+
registry plugins.PluginRegistry
128130

129131
// to do : cache
130132

@@ -144,6 +146,14 @@ type Scheduler struct {
144146

145147
type SchedulerParams struct {
146148
Logger logr.Logger
149+
150+
// file path for pluginConfig
151+
PluginConfigFile string
152+
pluginconfigs plugins.PluginConfigs
153+
}
154+
155+
func (p *SchedulerParams) PluginConfigs() plugins.PluginConfigs {
156+
return p.pluginconfigs
147157
}
148158

149159
type schedulerID struct {
@@ -320,7 +330,7 @@ func (s *Scheduler) RunFilterPlugins(ctx context.Context, state *framework.Cycle
320330
}
321331
for _, nodeInfo := range nodeInfos {
322332
status := framework.NewStatus()
323-
for _, pl := range s.filterPlugins {
333+
for _, pl := range s.registry.FilterPlugins() {
324334
status = pl.Filter(ctx, state, config, nodeInfo)
325335
if !status.IsSuccess() {
326336
status.SetFailedPlugin(pl.Name())
@@ -344,7 +354,7 @@ func (s *Scheduler) RunScorePlugins(ctx context.Context, state *framework.CycleS
344354
return nil, status
345355
}
346356
for index, nodeInfo := range nodeInfos {
347-
for _, pl := range s.scorePlugins {
357+
for _, pl := range s.registry.ScorePlugins() {
348358
score, status := pl.Score(ctx, state, config, nodeInfo)
349359
if !status.IsSuccess() {
350360
return nil, status
@@ -379,7 +389,7 @@ func selectHighestScoreNode(scoreList framework.NodeScoreList) (string, error) {
379389
}
380390

381391
func (s *Scheduler) RunVMIDPlugins(ctx context.Context, state *framework.CycleState, config api.VirtualMachineCreateOptions, nextid int, usedID map[int]bool) (int, error) {
382-
for _, pl := range s.vmidPlugins {
392+
for _, pl := range s.registry.VMIDPlugins() {
383393
key := pl.PluginKey()
384394
value := ctx.Value(key)
385395
if value != nil {

0 commit comments

Comments
 (0)