mirror of
https://github.com/jominjun94/k8s-cpu-limit-check-operator.git
synced 2026-01-29 21:55:38 +00:00
feat: add cpu-reaper-operator source code
This commit is contained in:
131
internal/controller/cpureaperpolicy_controller.go
Normal file
131
internal/controller/cpureaperpolicy_controller.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
reaperv1alpha1 "github.com/jominjun94/k8s-cpu-limit-check-operator/api/v1alpha1"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metricsv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
type CpuReaperPolicyReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
hotPods map[types.UID]time.Time
|
||||
}
|
||||
|
||||
func (r *CpuReaperPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
if r.hotPods == nil {
|
||||
r.hotPods = make(map[types.UID]time.Time)
|
||||
}
|
||||
|
||||
var policy reaperv1alpha1.CpuReaperPolicy
|
||||
if err := r.Get(ctx, req.NamespacedName, &policy); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(policy.Spec.PodSelector)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
var pods corev1.PodList
|
||||
if err := r.List(ctx, &pods, client.InNamespace(policy.Namespace), client.MatchingLabelsSelector{Selector: selector}); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Pod list", "count", len(pods.Items))
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Status.Phase != corev1.PodRunning {
|
||||
delete(r.hotPods, pod.UID)
|
||||
continue
|
||||
}
|
||||
|
||||
var metrics metricsv1beta1.PodMetrics
|
||||
if err := r.Get(ctx, types.NamespacedName{
|
||||
Name: pod.Name, Namespace: pod.Namespace,
|
||||
}, &metrics); err != nil {
|
||||
logger.Info("Failed to get pod metrics", "pod", pod.Name, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
used := cpuUsageMilli(&metrics)
|
||||
limit, ok := cpuLimitMilli(&pod)
|
||||
if !ok || limit == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
percent := int(math.Round(float64(used) * 100 / float64(limit)))
|
||||
|
||||
logger.Info("CPU check",
|
||||
"pod", pod.Name,
|
||||
"usedMilli", used,
|
||||
"limitMilli", limit,
|
||||
"percent", percent,
|
||||
"threshold", policy.Spec.ThresholdPercent,
|
||||
)
|
||||
|
||||
if percent >= policy.Spec.ThresholdPercent {
|
||||
start, exists := r.hotPods[pod.UID]
|
||||
if !exists {
|
||||
r.hotPods[pod.UID] = time.Now()
|
||||
continue
|
||||
}
|
||||
|
||||
if time.Since(start) >= time.Duration(policy.Spec.ForSeconds)*time.Second {
|
||||
logger.Info("CPU limit exceeded → deleting pod", "pod", pod.Name)
|
||||
_ = r.Delete(ctx, &pod)
|
||||
delete(r.hotPods, pod.UID)
|
||||
}
|
||||
} else {
|
||||
delete(r.hotPods, pod.UID)
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{
|
||||
RequeueAfter: time.Duration(policy.Spec.CheckIntervalSeconds) * time.Second,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func cpuUsageMilli(m *metricsv1beta1.PodMetrics) int64 {
|
||||
var total int64
|
||||
for _, c := range m.Containers {
|
||||
total += c.Usage.Cpu().MilliValue()
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
func cpuLimitMilli(p *corev1.Pod) (int64, bool) {
|
||||
var total int64
|
||||
for _, c := range p.Spec.Containers {
|
||||
if limit, ok := c.Resources.Limits[corev1.ResourceCPU]; ok {
|
||||
total += limit.MilliValue()
|
||||
} else if req, ok := c.Resources.Requests[corev1.ResourceCPU]; ok {
|
||||
total += req.MilliValue()
|
||||
}
|
||||
}
|
||||
if total == 0 {
|
||||
return 0, false
|
||||
}
|
||||
return total, true
|
||||
}
|
||||
|
||||
func (r *CpuReaperPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&reaperv1alpha1.CpuReaperPolicy{}).
|
||||
Complete(r)
|
||||
}
|
||||
84
internal/controller/cpureaperpolicy_controller_test.go
Normal file
84
internal/controller/cpureaperpolicy_controller_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
reaperv1alpha1 "github.com/jominjun94/k8s-cpu-limit-check-operator/api/v1alpha1"
|
||||
)
|
||||
|
||||
var _ = Describe("CpuReaperPolicy Controller", func() {
|
||||
Context("When reconciling a resource", func() {
|
||||
const resourceName = "test-resource"
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
typeNamespacedName := types.NamespacedName{
|
||||
Name: resourceName,
|
||||
Namespace: "default", // TODO(user):Modify as needed
|
||||
}
|
||||
cpureaperpolicy := &reaperv1alpha1.CpuReaperPolicy{}
|
||||
|
||||
BeforeEach(func() {
|
||||
By("creating the custom resource for the Kind CpuReaperPolicy")
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, cpureaperpolicy)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
resource := &reaperv1alpha1.CpuReaperPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: resourceName,
|
||||
Namespace: "default",
|
||||
},
|
||||
// TODO(user): Specify other spec details if needed.
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// TODO(user): Cleanup logic after each test, like removing the resource instance.
|
||||
resource := &reaperv1alpha1.CpuReaperPolicy{}
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, resource)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Cleanup the specific resource instance CpuReaperPolicy")
|
||||
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
|
||||
})
|
||||
It("should successfully reconcile the resource", func() {
|
||||
By("Reconciling the created resource")
|
||||
controllerReconciler := &CpuReaperPolicyReconciler{
|
||||
Client: k8sClient,
|
||||
Scheme: k8sClient.Scheme(),
|
||||
}
|
||||
|
||||
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: typeNamespacedName,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
|
||||
// Example: If you expect a certain status condition after reconciliation, verify it here.
|
||||
})
|
||||
})
|
||||
})
|
||||
90
internal/controller/suite_test.go
Normal file
90
internal/controller/suite_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
reaperv1alpha1 "github.com/jominjun94/k8s-cpu-limit-check-operator/api/v1alpha1"
|
||||
//+kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var cfg *rest.Config
|
||||
var k8sClient client.Client
|
||||
var testEnv *envtest.Environment
|
||||
|
||||
func TestControllers(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecs(t, "Controller Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
|
||||
// The BinaryAssetsDirectory is only required if you want to run the tests directly
|
||||
// without call the makefile target test. If not informed it will look for the
|
||||
// default path defined in controller-runtime which is /usr/local/kubebuilder/.
|
||||
// Note that you must have the required binaries setup under the bin directory to perform
|
||||
// the tests directly. When we run make test it will be setup and used automatically.
|
||||
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
|
||||
fmt.Sprintf("1.29.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
|
||||
}
|
||||
|
||||
var err error
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
err = reaperv1alpha1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
//+kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
Reference in New Issue
Block a user