Skip to content

Commit

Permalink
feat: some drafting
Browse files Browse the repository at this point in the history
Signed-off-by: Oliver Bähler <[email protected]>
  • Loading branch information
oliverbaehler committed Feb 19, 2025
1 parent 0a09ca8 commit 57602a3
Show file tree
Hide file tree
Showing 10 changed files with 188 additions and 28 deletions.
3 changes: 2 additions & 1 deletion api/v1beta2/globalresourcequota_func.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,10 @@ import (
"fmt"
"sort"

"github.com/projectcapsule/capsule/pkg/api"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"

"github.com/projectcapsule/capsule/pkg/api"
)

func (g *GlobalResourceQuota) GetQuotaSpace(index api.Name) (corev1.ResourceList, error) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,7 @@ webhooks:
- DELETE
resources:
- resourcequotas
- resourcequotas/status
scope: 'Namespaced'
sideEffects: None
namespaceSelector:
Expand Down
38 changes: 23 additions & 15 deletions controllers/globalquota/resourcequotas.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,23 +162,39 @@ func (r *Manager) syncResourceQuotas(
}
}

//nolint:nestif
return SyncResourceQuotas(ctx, r.Client, quota, matchingNamespaces)
}

// Synchronize resources quotas in all the given namespaces (routines)
func SyncResourceQuotas(
ctx context.Context,
c client.Client,
quota *capsulev1beta2.GlobalResourceQuota,
namespaces []string,
) (err error) {
group := new(errgroup.Group)

// Sync resource quotas for matching namespaces
for _, ns := range matchingNamespaces {
for _, ns := range namespaces {
namespace := ns

group.Go(func() error {
return r.syncResourceQuota(ctx, quota, namespace)
return SyncResourceQuota(ctx, c, quota, namespace)
})
}

return group.Wait()
}

// Synchronize a single resourcequota
//
//nolint:nakedret
func (r *Manager) syncResourceQuota(ctx context.Context, quota *capsulev1beta2.GlobalResourceQuota, namespace string) (err error) {
func SyncResourceQuota(
ctx context.Context,
c client.Client,
quota *capsulev1beta2.GlobalResourceQuota,
namespace string,
) (err error) {
// getting ResourceQuota labels for the mutateFn
var quotaLabel, typeLabel string

Expand All @@ -197,14 +213,12 @@ func (r *Manager) syncResourceQuota(ctx context.Context, quota *capsulev1beta2.G
}

// Verify if quota is present
if err := r.Client.Get(ctx, types.NamespacedName{Name: target.Name, Namespace: target.Namespace}, target); err != nil && !apierrors.IsNotFound(err) {
if err := c.Get(ctx, types.NamespacedName{Name: target.Name, Namespace: target.Namespace}, target); err != nil && !apierrors.IsNotFound(err) {
return err
}

var res controllerutil.OperationResult

err = retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) {
res, retryErr = controllerutil.CreateOrUpdate(ctx, r.Client, target, func() (err error) {
_, retryErr = controllerutil.CreateOrUpdate(ctx, c, target, func() (err error) {
targetLabels := target.GetLabels()
if targetLabels == nil {
targetLabels = map[string]string{}
Expand All @@ -229,18 +243,12 @@ func (r *Manager) syncResourceQuota(ctx context.Context, quota *capsulev1beta2.G
// It may be further reduced by the limits reconciler
target.Spec.Hard = space

r.Log.Info("Resource Quota sync result", "space", space, "name", target.Name, "namespace", target.Namespace)

return controllerutil.SetControllerReference(quota, target, r.Client.Scheme())
return controllerutil.SetControllerReference(quota, target, c.Scheme())
})

return retryErr
})

r.emitEvent(quota, target.GetNamespace(), res, fmt.Sprintf("Ensuring ResourceQuota %s", target.GetName()), err)

r.Log.Info("Resource Quota sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)

if err != nil {
return
}
Expand Down
68 changes: 68 additions & 0 deletions controllers/globalquota/utils.go
Original file line number Diff line number Diff line change
@@ -1,16 +1,84 @@
package globalquota

import (
"context"
"fmt"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/pkg/api"
capsuleutils "github.com/projectcapsule/capsule/pkg/utils"
)

// Get all matching namespaces (just names)
func GetMatchingGlobalQuotaNamespacesByName(
ctx context.Context,
c client.Client,
quota *capsulev1beta2.GlobalResourceQuota,
) (nsNames []string, err error) {
namespaces, err := GetMatchingGlobalQuotaNamespaces(ctx, c, quota)
if err != nil {
return
}

nsNames = make([]string, 0, len(namespaces))
for _, ns := range namespaces {
nsNames = append(nsNames, ns.Name)
}

return
}

// Get all matching namespaces
func GetMatchingGlobalQuotaNamespaces(
ctx context.Context,
c client.Client,
quota *capsulev1beta2.GlobalResourceQuota,
) (namespaces []corev1.Namespace, err error) {
// Collect Namespaces (Matching)
namespaces = make([]corev1.Namespace, 0)
seenNamespaces := make(map[string]struct{})

// Get Item within Resource Quota
objectLabel, err := capsuleutils.GetTypeLabel(&capsulev1beta2.Tenant{})
if err != nil {
return
}

for _, selector := range quota.Spec.Selectors {
selected, err := selector.GetMatchingNamespaces(ctx, c)
if err != nil {
continue
}

for _, ns := range selected {
// Skip if namespace is being deleted
if !ns.ObjectMeta.DeletionTimestamp.IsZero() {
continue
}

if _, exists := seenNamespaces[ns.Name]; exists {
continue // Skip duplicates
}

if selector.MustTenantNamespace {
if _, ok := ns.Labels[objectLabel]; !ok {
continue
}
}

seenNamespaces[ns.Name] = struct{}{}
namespaces = append(namespaces, ns)
}
}

return
}

// Returns for an item it's name as Kubernetes object
func ItemObjectName(itemName api.Name, quota *capsulev1beta2.GlobalResourceQuota) string {
// Generate a name using the tenant name and item name
Expand Down
40 changes: 40 additions & 0 deletions e2e/globalresourcequota_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"

Expand Down Expand Up @@ -116,6 +117,11 @@ var _ = Describe("Global ResourceQuotas", func() {
corev1.ResourcePods: resource.MustParse("5"),
},
},
"connectivity": {
Hard: corev1.ResourceList{
corev1.ResourceServices: resource.MustParse("2"),
},
},
},
},
}
Expand Down Expand Up @@ -167,6 +173,40 @@ var _ = Describe("Global ResourceQuotas", func() {
}
})

By("Scheduling services simultaneously in all namespaces", func() {
wg := sync.WaitGroup{} // Use WaitGroup for concurrency
for _, ns := range solarNs {
wg.Add(1)
go func(namespace string) { // Run in parallel
defer wg.Done()
service := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test-service",
Namespace: namespace,
Labels: map[string]string{
"test-label": "to-delete",
},
},
Spec: corev1.ServiceSpec{
// Select pods with this label (ensure these pods exist in the namespace)
Selector: map[string]string{"app": "test"},
Ports: []corev1.ServicePort{
{
Port: 80,
TargetPort: intstr.FromInt(8080),
Protocol: corev1.ProtocolTCP,
},
},
Type: corev1.ServiceTypeClusterIP,
},
}
err := k8sClient.Create(context.TODO(), service)
Expect(err).Should(Succeed(), "Failed to create Service in namespace %s", namespace)
}(ns)
}
wg.Wait() // Ensure all services are scheduled at the same time
})

By("Scheduling deployments simultaneously in all namespaces", func() {
wg := sync.WaitGroup{} // Use WaitGroup for concurrency
for _, ns := range solarNs {
Expand Down
3 changes: 1 addition & 2 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -233,8 +233,7 @@ func main() {
route.Cordoning(tenant.CordoningHandler(cfg), tenant.ResourceCounterHandler(manager.GetClient())),
route.Node(utils.InCapsuleGroups(cfg, node.UserMetadataHandler(cfg, kubeVersion))),
route.Defaults(defaults.Handler(cfg, kubeVersion)),
route.QuotaMutation(globalquotahook.StatusHandler(ctrl.Log.WithName("controllers").WithName("Webhook"))),
route.QuotaValidation(utils.InCapsuleGroups(cfg, globalquotahook.ValidationHandler()), globalquotahook.DeletionHandler(ctrl.Log.WithName("controllers").WithName("Webhook"))),
route.QuotaValidation(globalquotahook.StatusHandler(ctrl.Log.WithName("controllers").WithName("Webhook")), utils.InCapsuleGroups(cfg, globalquotahook.ValidationHandler()), globalquotahook.DeletionHandler(ctrl.Log.WithName("controllers").WithName("Webhook"))),
)

nodeWebhookSupported, _ := utils.NodeWebhookSupported(kubeVersion)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,10 @@ func (h *statusHandler) OnUpdate(c client.Client, decoder admission.Decoder, rec
}

func (h *statusHandler) calculate(ctx context.Context, c client.Client, decoder admission.Decoder, recorder record.EventRecorder, req admission.Request) *admission.Response {
h.log.V(3).Info("loggign request", "REQUEST", req)

return utils.ErroredResponse(fmt.Errorf("meowie"))

Check failure on line 54 in pkg/webhook/globalquota/calculation.go

View workflow job for this annotation

GitHub Actions / lint

unreachable-code: unreachable code after this statement (revive)

// Decode the incoming object
quota := &corev1.ResourceQuota{}
if err := decoder.Decode(req, quota); err != nil {
Expand All @@ -61,7 +65,7 @@ func (h *statusHandler) calculate(ctx context.Context, c client.Client, decoder
return utils.ErroredResponse(fmt.Errorf("failed to decode old ResourceQuota object: %w", err))
}

h.log.V(5).Info("loggign request", "REQUEST", req)
h.log.V(3).Info("loggign request", "REQUEST", req)

// Get Item within Resource Quota
indexLabel := capsuleutils.GetGlobalResourceQuotaTypeLabel()
Expand All @@ -83,7 +87,7 @@ func (h *statusHandler) calculate(ctx context.Context, c client.Client, decoder

// Skip if quota not active
if !globalQuota.Spec.Active {
h.log.V(5).Info("GlobalQuota is not active", "quota", globalQuota.Name)
h.log.V(3).Info("GlobalQuota is not active", "quota", globalQuota.Name)

return nil
}
Expand Down Expand Up @@ -117,12 +121,20 @@ func (h *statusHandler) calculate(ctx context.Context, c client.Client, decoder
tenantUsed = corev1.ResourceList{}
}

h.log.V(5).Info("Available space calculated", "space", availableSpace)
h.log.V(3).Info("Available space calculated", "space", availableSpace)

// Process each resource and enforce allocation limits
for resourceName, avail := range availableSpace {
rlog := h.log.WithValues("resource", resourceName)

rlog.V(3).Info("AVAILABLE", "avail", avail, "USED", tenantUsed[resourceName], "HARD", tenantQuota.Hard[resourceName])

if avail.Cmp(zero) == 0 {
rlog.V(3).Info("NO SPACE AVAILABLE")
quota.Status.Hard[resourceName] = oldQuota.Status.Hard[resourceName]
continue
}

// Get From the status whet's currently Used
var globalUsage resource.Quantity
if currentUsed, exists := tenantUsed[resourceName]; exists {
Expand All @@ -148,7 +160,7 @@ func (h *statusHandler) calculate(ctx context.Context, c client.Client, decoder
diff := newRequested.DeepCopy()
diff.Sub(oldAllocated)

rlog.V(5).Info("calculate ingestion", "diff", diff, "old", oldAllocated, "new", newRequested)
rlog.V(3).Info("calculate ingestion", "diff", diff, "old", oldAllocated, "new", newRequested)

// Compare how the newly ingested resources compare against empty resources
// This is the quickest way to find out, how the status must be updated
Expand All @@ -160,7 +172,7 @@ func (h *statusHandler) calculate(ctx context.Context, c client.Client, decoder
continue
// Resource Consumtion Increased
case stat > 0:
rlog.V(5).Info("increase")
rlog.V(3).Info("increase")
// Validate Space
// Overprovisioned, allocate what's left
if avail.Cmp(diff) < 0 {
Expand All @@ -173,7 +185,7 @@ func (h *statusHandler) calculate(ctx context.Context, c client.Client, decoder

//oldAllocated.Add(avail)
rlog.V(5).Info("PREVENT OVERPROVISING", "allocation", oldAllocated)
quota.Status.Hard[resourceName] = oldAllocated
quota.Status.Hard[resourceName] = oldQuota.Status.Hard[resourceName]

} else {
// Adding, since requested resources have space
Expand All @@ -185,7 +197,7 @@ func (h *statusHandler) calculate(ctx context.Context, c client.Client, decoder
}
// Resource Consumption decreased
default:
rlog.V(5).Info("negate")
rlog.V(3).Info("negate")
// SUbstract Difference from available
// Negative values also combine correctly with the Add() operation
globalUsage.Add(diff)
Expand All @@ -197,9 +209,7 @@ func (h *statusHandler) calculate(ctx context.Context, c client.Client, decoder
}
}

rlog.V(5).Info("calculate ingestion", "diff", diff, "usage", avail, "usage", globalUsage)

rlog.V(5).Info("caclulated total usage", "global", globalUsage, "requested", quota.Status.Used[resourceName])
rlog.V(3).Info("caclulated total usage", "global", globalUsage, "diff", diff, "usage", avail, "hard", quota.Status.Hard[resourceName], "usage", quota.Status.Used[resourceName])
tenantUsed[resourceName] = globalUsage
}

Expand Down
File renamed without changes.
26 changes: 26 additions & 0 deletions tnt.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
apiVersion: capsule.clastix.io/v1beta2
kind: Tenant
metadata:
creationTimestamp: "2025-02-18T17:38:52Z"
generation: 1
labels:
customer-resource-pool: dev
kubernetes.io/metadata.name: solar-quota
name: solar-quota
resourceVersion: "28140"
uid: 81c4ca40-550c-4dca-97f7-6f0ca98ad88a
spec:
cordoned: false
ingressOptions:
hostnameCollisionScope: Disabled
limitRanges: {}
networkPolicies: {}
owners:
- clusterRoles:
- admin
- capsule-namespace-deleter
kind: User
name: solar-user
preventDeletion: false
resourceQuotas:
scope: Tenant
7 changes: 7 additions & 0 deletions zero-quota.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: compute-resources
spec:
hard:
pods: "0"

Check failure on line 7 in zero-quota.yaml

View workflow job for this annotation

GitHub Actions / yamllint

7:6 [indentation] wrong indentation: expected 4 but found 5

0 comments on commit 57602a3

Please sign in to comment.