Cannot delete project - name has already been taken

Hello,

I’m running 16.2.1-ee.0 on Ubuntu 22.04.2. I’m trying to delete my own project, but I cannot. I keep getting this error:

This project was scheduled for deletion, but failed with the following message: Validation failed: Name has already been taken 

Changing the project name hasn’t solved anything. And the project path cannot be changed. When I try to, I get this error:

Cannot rename project because it contains container registry tags! 

When I try to delete the tags, it seems to be stuck at:

This image repository is scheduled for deletion

And that’s about it.

sidekiq is spamming with container_repository_delete requests, such as:

{
  "severity": "INFO",
  "time": "2023-07-27T15:28:43.549Z",
  "retry": 0,
  "queue": "default",
  "version": 0,
  "status_expiration": 1800,
  "queue_namespace": "container_repository_delete",
  "args": [],
  "class": "ContainerRegistry::DeleteContainerRepositoryWorker",
  "jid": "f12b43ff7da5736ed539fdf5",
  "created_at": "2023-07-27T15:28:43.449Z",
  "meta.caller_id": "ContainerRegistry::DeleteContainerRepositoryWorker",
  "correlation_id": "89b35f44ccaa9b6758bd18c8526ec0a5",
  "meta.root_caller_id": "Cronjob",
  "meta.feature_category": "container_registry",
  "meta.client_id": "ip/",
  "worker_data_consistency": "always",
  "size_limiter": "validated",
  "enqueued_at": "2023-07-27T15:28:43.450Z",
  "job_size_bytes": 2,
  "pid": 1109022,
  "message": "ContainerRegistry::DeleteContainerRepositoryWorker JID-f12b43ff7da5736ed539fdf5: done: 0.093509 sec",
  "job_status": "done",
  "scheduling_latency_s": 0.00532,
  "redis_calls": 8,
  "redis_duration_s": 0.002258,
  "redis_read_bytes": 9,
  "redis_write_bytes": 1302,
  "redis_queues_calls": 4,
  "redis_queues_duration_s": 0.001368,
  "redis_queues_read_bytes": 5,
  "redis_queues_write_bytes": 763,
  "redis_shared_state_calls": 4,
  "redis_shared_state_duration_s": 0.00089,
  "redis_shared_state_read_bytes": 4,
  "redis_shared_state_write_bytes": 539,
  "db_count": 8,
  "db_write_count": 3,
  "db_cached_count": 1,
  "db_replica_count": 0,
  "db_primary_count": 8,
  "db_main_count": 8,
  "db_ci_count": 0,
  "db_main_replica_count": 0,
  "db_ci_replica_count": 0,
  "db_replica_cached_count": 0,
  "db_primary_cached_count": 1,
  "db_main_cached_count": 1,
  "db_ci_cached_count": 0,
  "db_main_replica_cached_count": 0,
  "db_ci_replica_cached_count": 0,
  "db_replica_wal_count": 0,
  "db_primary_wal_count": 0,
  "db_main_wal_count": 0,
  "db_ci_wal_count": 0,
  "db_main_replica_wal_count": 0,
  "db_ci_replica_wal_count": 0,
  "db_replica_wal_cached_count": 0,
  "db_primary_wal_cached_count": 0,
  "db_main_wal_cached_count": 0,
  "db_ci_wal_cached_count": 0,
  "db_main_replica_wal_cached_count": 0,
  "db_ci_replica_wal_cached_count": 0,
  "db_replica_duration_s": 0,
  "db_primary_duration_s": 0.005,
  "db_main_duration_s": 0.005,
  "db_ci_duration_s": 0,
  "db_main_replica_duration_s": 0,
  "db_ci_replica_duration_s": 0,
  "external_http_count": 6,
  "external_http_duration_s": 0.03383826804929413,
  "cpu_s": 0.050039,
  "mem_objects": 13950,
  "mem_bytes": 1144520,
  "mem_mallocs": 3822,
  "mem_total_bytes": 1702520,
  "worker_id": "sidekiq_0",
  "rate_limiting_gates": [],
  "duration_s": 0.093509,
  "completed_at": "2023-07-27T15:28:43.549Z",
  "load_balancing_strategy": "primary",
  "db_duration_s": 0.00642,
  "urgency": "low",
  "target_duration_s": 300
}

(several per second).

Any ideas how I could start debugging this?

Ok, I’ve just realised what the problem was. Someone installed the registry as a separate docker container for some reason. I’m not sure about how the whole process worked, but the idea is that gitlab somehow didn’t seem to have the right permissions to make changes to the registry or, in any case, the requests were in the pending state indefinitely. That’s why I got all those requests in the sidekiq logs.

Using the gitlab registry (built-in) instead started from gitlab-ctl works as expected.