Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions pkg/errors/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ const (
// Conflict errors (CNF) - 409
CodeConflictExists = "HYPERFLEET-CNF-001"
CodeConflictVersion = "HYPERFLEET-CNF-002"
CodeConflictState = "HYPERFLEET-CNF-003"

// Rate Limit errors (LMT) - 429
CodeRateLimitExceeded = "HYPERFLEET-LMT-001"
Expand Down Expand Up @@ -166,6 +167,10 @@ var errorDefinitions = map[string]errorDefinition{
CodeConflictVersion: {
ErrorTypeConflict, "Version Conflict", "The resource version does not match", http.StatusConflict,
},
CodeConflictState: {
ErrorTypeConflict, "State Conflict",
"Operation not allowed in current resource state", http.StatusConflict,
},

// Rate Limit errors (LMT) - 429
CodeRateLimitExceeded: {
Expand Down Expand Up @@ -363,6 +368,10 @@ func Conflict(reason string, values ...interface{}) *ServiceError {
return New(CodeConflictExists, reason, values...)
}

func ConflictState(reason string, values ...interface{}) *ServiceError {
return New(CodeConflictState, reason, values...)
}

func Validation(reason string, values ...interface{}) *ServiceError {
return New(CodeValidationMultiple, reason, values...)
}
Expand Down
4 changes: 4 additions & 0 deletions pkg/handlers/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,10 @@ func (h ClusterHandler) Patch(w http.ResponseWriter, r *http.Request) {
return nil, err
}

if found.DeletedTime != nil {
return nil, errors.ConflictState("Cluster '%s' is marked for deletion", id)
}

if patch.Spec != nil {
specJSON, err := json.Marshal(*patch.Spec)
if err != nil {
Expand Down
14 changes: 13 additions & 1 deletion pkg/handlers/cluster_nodepools.go
Original file line number Diff line number Diff line change
Expand Up @@ -190,11 +190,15 @@ func (h ClusterNodePoolsHandler) Patch(w http.ResponseWriter, r *http.Request) {
clusterID := mux.Vars(r)["id"]
nodePoolID := mux.Vars(r)["nodepool_id"]

_, err := h.clusterService.Get(ctx, clusterID)
cluster, err := h.clusterService.Get(ctx, clusterID)
if err != nil {
return nil, err
}

if cluster.DeletedTime != nil {
return nil, errors.ConflictState("Cluster '%s' is marked for deletion", clusterID)
}
Comment on lines +193 to +200
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Description: Inspect NodePool/Cluster service implementations for atomic soft-delete enforcement on write paths.

rg -nP --type go 'func \(.*\) (Create|Replace)\(' -C 4
rg -nP --type go 'DeletedTime|ConflictState|transaction|tx\b|FOR UPDATE|optimistic' -C 4

Repository: openshift-hyperfleet/hyperfleet-api

Length of output: 50392


🏁 Script executed:

# Find Create/Replace implementations in service and DAO layers with DeletedTime checks
rg -n 'func.*Create\(|func.*Replace\(' pkg/services pkg/dao -A 20 | head -100

Repository: openshift-hyperfleet/hyperfleet-api

Length of output: 6057


🏁 Script executed:

# Search for DeletedTime checks in Create/Replace context
rg -B 5 -A 15 'DeletedTime' pkg/services pkg/dao | grep -E '(func|Create|Replace|DeletedTime)' | head -50

Repository: openshift-hyperfleet/hyperfleet-api

Length of output: 3722


🏁 Script executed:

# Look at the actual handler code around line 193-200 and 269-271
sed -n '190,210p' pkg/handlers/cluster_nodepools.go
sed -n '265,275p' pkg/handlers/cluster_nodepools.go

Repository: openshift-hyperfleet/hyperfleet-api

Length of output: 996


🏁 Script executed:

# Check if the write operations (Create/Replace) in services revalidate state
rg -A 30 'func \(s \*sqlClusterService\) Create\(' pkg/services/cluster.go
rg -A 30 'func \(s \*sqlNodePoolService\) Create\(' pkg/services/node_pool.go

Repository: openshift-hyperfleet/hyperfleet-api

Length of output: 1760


The soft-delete check must be re-validated atomically in the write path.

The handler's Get and subsequent Create/Replace are separate operations, allowing a cluster to be marked for deletion between the check (line 195–197) and the write. The service and DAO implementations skip re-validation—Create() and Replace() proceed without re-checking DeletedTime.

Move the validation into the service or DAO layer and enforce it transactionally with the write, or add an explicit precondition check (e.g., WHERE cluster.deleted_time IS NULL) in the database write to prevent orphaned nodepools.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@pkg/handlers/cluster_nodepools.go` around lines 193 - 200, The soft-delete
check on cluster.DeletedTime in the handler (after clusterService.Get) is racy;
move this validation into the service/DAO layer and enforce it atomically with
the write. Update the nodepool creation/replacement paths (the service methods
that call Create()/Replace() on the DAO) to either re-check DeletedTime inside
the same transaction or add a precondition to the DB write (e.g., include WHERE
cluster.deleted_time IS NULL in the INSERT/UPDATE SQL) so Create()/Replace()
fail if the cluster is marked deleted; ensure the service/DAO returns a clear
ConflictState error when that condition is hit.


found, err := h.nodePoolService.Get(ctx, nodePoolID)
if err != nil {
return nil, err
Expand All @@ -204,6 +208,10 @@ func (h ClusterNodePoolsHandler) Patch(w http.ResponseWriter, r *http.Request) {
return nil, errors.NotFound("NodePool '%s' not found for cluster '%s'", nodePoolID, clusterID)
}

if found.DeletedTime != nil {
return nil, errors.ConflictState("NodePool '%s' is marked for deletion", nodePoolID)
}

if patch.Spec != nil {
specJSON, jsonErr := json.Marshal(*patch.Spec)
if jsonErr != nil {
Expand Down Expand Up @@ -258,6 +266,10 @@ func (h ClusterNodePoolsHandler) Create(w http.ResponseWriter, r *http.Request)
return nil, err
}

if cluster.DeletedTime != nil {
return nil, errors.ConflictState("Cluster '%s' is marked for deletion", clusterID)
}

// Use the presenters.ConvertNodePool helper to convert the request
nodePoolModel, convErr := presenters.ConvertNodePool(&req, cluster.ID, "system@hyperfleet.local")
if convErr != nil {
Expand Down
Loading