Skip to content

Commit 6f39763

Browse files
wip
1 parent 01e4c8a commit 6f39763

34 files changed

+393
-5898
lines changed
Lines changed: 298 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,298 @@
1+
#!/bin/bash
2+
set -eu
3+
4+
NAMESPACE="${1:-default}"
5+
ZENKO_NAME="${2:-end2end}"
6+
7+
echo "=================================================="
8+
echo "ZENKO SETUP STATE DUMP"
9+
echo "Namespace: ${NAMESPACE}"
10+
echo "Zenko Name: ${ZENKO_NAME}"
11+
echo "Timestamp: $(date -u +"%Y-%m-%dT%H:%M:%SZ")"
12+
echo "=================================================="
13+
echo ""
14+
15+
echo "=== QUICK SUMMARY ==="
16+
INSTANCE_ID=$(kubectl get zenko "${ZENKO_NAME}" -n "${NAMESPACE}" -o jsonpath='{.status.instanceID}' 2>/dev/null || echo "NONE")
17+
ZENKO_AVAILABLE=$(kubectl get zenko "${ZENKO_NAME}" -n "${NAMESPACE}" -o jsonpath='{.status.conditions[?(@.type=="Available")].status}' 2>/dev/null || echo "UNKNOWN")
18+
NUM_LOCATIONS=$(kubectl get secret -l "app.kubernetes.io/name=connector-cloudserver-config,app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json 2>/dev/null | \
19+
jq '.items[0].data."locationConfig.json"' | sed 's/"//g' | base64 -d 2>/dev/null | jq '. | length' || echo "0")
20+
NUM_SORBET_FWDS=$(kubectl get deployment -n "${NAMESPACE}" -l "app.kubernetes.io/instance=${ZENKO_NAME}" -o json 2>/dev/null | \
21+
jq '[.items[] | select(.metadata.labels."sorbetd-location" != null)] | length' || echo "0")
22+
NUM_ACCOUNTS=$(kubectl get secrets -n "${NAMESPACE}" -l "type=end2end" -o json 2>/dev/null | jq '.items | length' || echo "0")
23+
24+
echo "Instance ID: ${INSTANCE_ID}"
25+
echo "Zenko Available: ${ZENKO_AVAILABLE}"
26+
echo "Locations configured: ${NUM_LOCATIONS}"
27+
echo "Sorbet forwarders: ${NUM_SORBET_FWDS}"
28+
echo "Test accounts: ${NUM_ACCOUNTS}"
29+
echo ""
30+
31+
echo "=== ZENKO CUSTOM RESOURCE ==="
32+
kubectl get zenko "${ZENKO_NAME}" -n "${NAMESPACE}" -o json | jq '.' || echo "ERROR: Failed to get Zenko CR"
33+
echo ""
34+
35+
echo "=== ZENKO STATUS ==="
36+
kubectl get zenko "${ZENKO_NAME}" -n "${NAMESPACE}" -o json | jq '.status' || echo "ERROR: Failed to get status"
37+
echo ""
38+
39+
echo "=== ZENKO ANNOTATIONS ==="
40+
kubectl get zenko "${ZENKO_NAME}" -n "${NAMESPACE}" -o json | jq '.metadata.annotations' || echo "No annotations"
41+
echo ""
42+
43+
echo "=== INSTANCE ID ==="
44+
echo "${INSTANCE_ID}"
45+
echo ""
46+
47+
echo "=== SORBET CONFIGURATION (from Zenko CR) ==="
48+
kubectl get zenko "${ZENKO_NAME}" -n "${NAMESPACE}" -o json | jq '.spec.sorbet' || echo "No sorbet config"
49+
echo ""
50+
51+
echo "=== CLOUDSERVER CONFIG SECRET ==="
52+
kubectl get secret -l "app.kubernetes.io/name=connector-cloudserver-config,app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json | \
53+
jq '.items[0].data."config.json"' | sed 's/"//g' | base64 -d | jq '.' || echo "ERROR: Failed to get cloudserver config"
54+
echo ""
55+
56+
echo "=== LOCATION CONFIG (from cloudserver config) ==="
57+
kubectl get secret -l "app.kubernetes.io/name=connector-cloudserver-config,app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json | \
58+
jq '.items[0].data."locationConfig.json"' | sed 's/"//g' | base64 -d | jq '.' || echo "ERROR: Failed to get location config"
59+
echo ""
60+
61+
echo "=== BACKBEAT CONFIG ==="
62+
kubectl get secret -l "app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json | \
63+
jq '.items[0].data."config.json"' | sed 's/"//g' | base64 -d | jq '.' || echo "ERROR: Failed to get backbeat config"
64+
echo ""
65+
66+
echo "=== SORBET CONFIGS (cold location configs) ==="
67+
for secret in $(kubectl get secrets -n "${NAMESPACE}" -l "app.kubernetes.io/instance=${ZENKO_NAME}" -o name | grep sorbet-config); do
68+
echo "--- ${secret} ---"
69+
kubectl get "${secret}" -n "${NAMESPACE}" -o json | \
70+
jq '.data."config.json"' | sed 's/"//g' | base64 -d | jq '.' || echo "ERROR: Failed to decode"
71+
echo ""
72+
done
73+
echo ""
74+
75+
echo "=== SORBET FORWARDER USER CREDENTIALS ==="
76+
kubectl get secret -l "app.kubernetes.io/name=sorbet-fwd-creds,app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json | \
77+
jq '.items[0] | {name: .metadata.name, accessKeyId: (.data.accessKey | @base64d)[0:10] + "..."}' || echo "No sorbet-fwd credentials found"
78+
echo ""
79+
80+
echo "=== SERVICE USER CREDENTIALS ==="
81+
for user_type in backbeat-lcbp-user-creds backbeat-lcc-user-creds backbeat-lcop-user-creds backbeat-qp-user-creds; do
82+
echo "--- ${user_type} ---"
83+
kubectl get secret -l "app.kubernetes.io/name=${user_type},app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json | \
84+
jq '.items[0].metadata.name' 2>/dev/null || echo "Not found"
85+
done
86+
echo ""
87+
88+
echo "=== ACCOUNT SECRETS ==="
89+
kubectl get secrets -n "${NAMESPACE}" -l "type=end2end" -o json | \
90+
jq '.items[] | {name: .metadata.name, accountId: (.data.AccountId | @base64d), accessKeyId: (.data.AccessKeyId | @base64d)[0:10] + "..."}' || echo "No account secrets found"
91+
echo ""
92+
93+
echo "=== KAFKA TOPICS ==="
94+
INSTANCE_ID=$(kubectl get zenko "${ZENKO_NAME}" -n "${NAMESPACE}" -o jsonpath='{.status.instanceID}')
95+
if [ -n "${INSTANCE_ID}" ]; then
96+
echo "Instance ID: ${INSTANCE_ID}"
97+
98+
KAFKA_IMAGE=$(yq eval ".kafka.sourceRegistry" ../../../solution/deps.yaml)/$(yq eval ".kafka.image" ../../../solution/deps.yaml):$(yq eval ".kafka.tag" ../../../solution/deps.yaml)
99+
KAFKA_HOST_PORT=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance="${ZENKO_NAME}" -n "${NAMESPACE}" \
100+
-o jsonpath='{.items[0].data.config\.json}' | base64 -d | jq -r '.kafka.hosts')
101+
102+
echo "Listing Kafka topics (grep for ${INSTANCE_ID}):"
103+
kubectl run kafka-list-topics-dump \
104+
--image="${KAFKA_IMAGE}" \
105+
--pod-running-timeout=2m \
106+
--rm \
107+
--restart=Never \
108+
--attach=True \
109+
--namespace="${NAMESPACE}" \
110+
--command -- bash -c "kafka-topics.sh --list --bootstrap-server ${KAFKA_HOST_PORT}" 2>/dev/null | grep "${INSTANCE_ID}" || echo "No topics found for instance"
111+
fi
112+
echo ""
113+
114+
echo "=== ZENKO NOTIFICATION TARGETS ==="
115+
kubectl get zenkonotificationtargets -n "${NAMESPACE}" -o json | jq '.items[] | {name: .metadata.name, spec: .spec}' || echo "No notification targets"
116+
echo ""
117+
118+
echo "=== DEPLOYMENTS STATUS ==="
119+
echo "--- Cloudserver ---"
120+
kubectl get deployment -l "app.kubernetes.io/name=connector-cloudserver,app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json | \
121+
jq '.items[0] | {name: .metadata.name, replicas: .status.replicas, ready: .status.readyReplicas, generation: .status.observedGeneration, annotations: .metadata.annotations}' || echo "Not found"
122+
123+
echo "--- Internal Cloudserver ---"
124+
kubectl get deployment -l "app.kubernetes.io/name=internal-s3api,app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json | \
125+
jq '.items[0] | {name: .metadata.name, replicas: .status.replicas, ready: .status.readyReplicas}' || echo "Not found"
126+
127+
echo "--- Backbeat Lifecycle Transition Processor ---"
128+
kubectl get deployment -l "app.kubernetes.io/name=backbeat-lifecycle-transition-processor,app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json | \
129+
jq '.items[0] | {name: .metadata.name, replicas: .status.replicas, ready: .status.readyReplicas}' || echo "Not found"
130+
131+
echo "--- Backbeat Lifecycle Conductor ---"
132+
kubectl get deployment -l "app.kubernetes.io/name=backbeat-lifecycle-conductor,app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json | \
133+
jq '.items[0] | {name: .metadata.name, replicas: .status.replicas, ready: .status.readyReplicas}' || echo "Not found"
134+
135+
echo "--- Backbeat Lifecycle Bucket Processor ---"
136+
kubectl get deployment -l "app.kubernetes.io/name=backbeat-lifecycle-bucket-processor,app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json | \
137+
jq '.items[0] | {name: .metadata.name, replicas: .status.replicas, ready: .status.readyReplicas}' || echo "Not found"
138+
139+
echo "--- ALL Sorbet Forwarder Deployments ---"
140+
kubectl get deployment -n "${NAMESPACE}" -l "app.kubernetes.io/instance=${ZENKO_NAME}" -o json | \
141+
jq '.items[] | select(.metadata.labels."sorbetd-location" != null) | {name: .metadata.name, location: .metadata.labels."sorbetd-location", replicas: .status.replicas, ready: .status.readyReplicas}' || echo "No sorbet forwarders found"
142+
143+
echo "--- Mock Sorbet ---"
144+
kubectl get deployment mock-sorbet -n "${NAMESPACE}" -o json | \
145+
jq '{name: .metadata.name, replicas: .status.replicas, ready: .status.readyReplicas}' 2>/dev/null || echo "Not found"
146+
147+
echo "--- Mock Miria ---"
148+
kubectl get deployment mock-miria -n "${NAMESPACE}" -o json | \
149+
jq '{name: .metadata.name, replicas: .status.replicas, ready: .status.readyReplicas}' 2>/dev/null || echo "Not found"
150+
echo ""
151+
152+
echo "=== PODS STATUS ==="
153+
echo "All pods (sorbet and backbeat lifecycle):"
154+
kubectl get pods -n "${NAMESPACE}" --no-headers | grep -E "(sorbet|backbeat-lifecycle)" || echo "No sorbet/backbeat pods found"
155+
echo ""
156+
echo "Failing/Crashing pods in namespace:"
157+
kubectl get pods -n "${NAMESPACE}" --field-selector=status.phase!=Running,status.phase!=Succeeded -o json | \
158+
jq '.items[] | {name: .metadata.name, phase: .status.phase, reason: .status.reason, containers: [.status.containerStatuses[]? | {name: .name, ready: .ready, restartCount: .restartCount, state: .state}]}' || echo "No failing pods"
159+
echo ""
160+
161+
echo "=== DMF VOLUME CHECK (via mock-sorbet pod) ==="
162+
if kubectl get deployment mock-sorbet -n "${NAMESPACE}" >/dev/null 2>&1; then
163+
SORBET_POD=$(kubectl get pods -n "${NAMESPACE}" -l "sorbetd-name=mock-sorbet" -o jsonpath='{.items[0].metadata.name}')
164+
if [ -n "${SORBET_POD}" ]; then
165+
echo "Sorbet pod: ${SORBET_POD}"
166+
echo "Files in /data:"
167+
kubectl exec -n "${NAMESPACE}" "${SORBET_POD}" -- find /data -type f 2>/dev/null | head -20 || echo "ERROR: Cannot access sorbet pod"
168+
echo "File count in /data:"
169+
kubectl exec -n "${NAMESPACE}" "${SORBET_POD}" -- sh -c 'find /data -type f | wc -l' 2>/dev/null || echo "ERROR"
170+
else
171+
echo "ERROR: No mock-sorbet pod found"
172+
fi
173+
else
174+
echo "mock-sorbet deployment not found"
175+
fi
176+
echo ""
177+
178+
echo "=== HOST VOLUME CHECK (via hostPath) ==="
179+
echo "Attempting to check /data/sorbet-data-0 via ephemeral pod..."
180+
kubectl run diagnostic-volume-check \
181+
--image=alpine:3.22 \
182+
--restart=Never \
183+
--rm \
184+
--attach=True \
185+
--pod-running-timeout=1m \
186+
--namespace="${NAMESPACE}" \
187+
--overrides='
188+
{
189+
"spec": {
190+
"containers": [{
191+
"name": "diagnostic",
192+
"image": "alpine:3.22",
193+
"command": ["/bin/sh", "-c"],
194+
"args": ["echo Files in /cold-data: && find /cold-data -type f | wc -l && find /cold-data -type f | head -10"],
195+
"volumeMounts": [{
196+
"name": "volume",
197+
"mountPath": "/cold-data"
198+
}]
199+
}],
200+
"volumes": [{
201+
"name": "volume",
202+
"hostPath": {
203+
"path": "/data/sorbet-data-0",
204+
"type": "DirectoryOrCreate"
205+
}
206+
}]
207+
}
208+
}' 2>&1 || echo "Failed to check host volume"
209+
echo ""
210+
211+
echo "=== PVC STATUS ==="
212+
kubectl get pvc -n "${NAMESPACE}" -o json | jq '.items[] | {name: .metadata.name, status: .status.phase, capacity: .status.capacity.storage, storageClass: .spec.storageClassName}' || echo "No PVCs"
213+
echo ""
214+
215+
echo "=== PV BACKING sorbet-data ==="
216+
kubectl get pv -o json | jq '.items[] | select(.spec.claimRef.name == "sorbet-data") | {name: .metadata.name, hostPath: .spec.hostPath, capacity: .spec.capacity.storage, accessModes: .spec.accessModes}' || echo "No PV for sorbet-data"
217+
echo ""
218+
219+
echo "=== MONGO DB CONFIG ==="
220+
kubectl get zenko "${ZENKO_NAME}" -n "${NAMESPACE}" -o json | jq '.spec.mongodb' || echo "No mongodb config"
221+
echo ""
222+
223+
echo "=== SERVICE ENDPOINTS READY CHECK ==="
224+
for svc in "${ZENKO_NAME}-connector-s3api" "${ZENKO_NAME}-connector-vault-auth-api" "mock-sorbet"; do
225+
echo "--- ${svc} ---"
226+
kubectl get endpoints "${svc}" -n "${NAMESPACE}" -o json 2>/dev/null | jq '{addresses: [.subsets[].addresses[].ip]}' || echo "Service not found"
227+
done
228+
echo ""
229+
230+
echo "=== KAFKA CONSUMER GROUPS ==="
231+
if [ -n "${INSTANCE_ID}" ]; then
232+
KAFKA_POD=$(kubectl get pods -n "${NAMESPACE}" -l "brokerId=0,app=kafka" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
233+
if [ -n "${KAFKA_POD}" ]; then
234+
echo "Kafka pod: ${KAFKA_POD}"
235+
echo "Consumer groups for this instance:"
236+
kubectl exec -n "${NAMESPACE}" "${KAFKA_POD}" -- bash -lc "export KAFKA_OPTS='' && kafka-consumer-groups.sh --bootstrap-server localhost:9092 --list" 2>/dev/null | grep "${INSTANCE_ID}" || echo "No consumer groups found"
237+
else
238+
echo "Kafka pod not found"
239+
fi
240+
fi
241+
echo ""
242+
243+
echo "=== BACKBEAT EXTENSIONS (from config) ==="
244+
kubectl get secret -l "app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json | \
245+
jq '.items[0].data."config.json"' | sed 's/"//g' | base64 -d | jq '.extensions | keys' || echo "ERROR: Failed to get extensions"
246+
echo ""
247+
248+
echo "=== SECRET RESOURCE VERSIONS ==="
249+
echo "connector-cloudserver-config:"
250+
kubectl get secret -l "app.kubernetes.io/name=connector-cloudserver-config,app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json | \
251+
jq '.items[0] | {name: .metadata.name, resourceVersion: .metadata.resourceVersion, creationTimestamp: .metadata.creationTimestamp}'
252+
echo "backbeat-config:"
253+
kubectl get secret -l "app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=${ZENKO_NAME}" -n "${NAMESPACE}" -o json | \
254+
jq '.items[0] | {name: .metadata.name, resourceVersion: .metadata.resourceVersion, creationTimestamp: .metadata.creationTimestamp}'
255+
echo ""
256+
257+
echo "=== RECENT POD LOGS (last 30 lines each) ==="
258+
echo "--- Sorbet Forwarder for e2e-cold (if exists) ---"
259+
SORBET_FWD_POD=$(kubectl get pods -n "${NAMESPACE}" -l "sorbetd-location=e2e-cold,app.kubernetes.io/instance=${ZENKO_NAME}" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
260+
if [ -n "${SORBET_FWD_POD}" ]; then
261+
kubectl logs -n "${NAMESPACE}" "${SORBET_FWD_POD}" --tail=30 2>/dev/null || echo "Cannot get logs"
262+
else
263+
echo "No sorbet-forwarder pod for e2e-cold"
264+
fi
265+
echo ""
266+
267+
echo "--- Mock Sorbet ---"
268+
MOCK_SORBET_POD=$(kubectl get pods -n "${NAMESPACE}" -l "sorbetd-name=mock-sorbet" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
269+
if [ -n "${MOCK_SORBET_POD}" ]; then
270+
kubectl logs -n "${NAMESPACE}" "${MOCK_SORBET_POD}" --tail=30 2>/dev/null || echo "Cannot get logs"
271+
else
272+
echo "No mock-sorbet pod"
273+
fi
274+
echo ""
275+
276+
echo "--- Backbeat Lifecycle Transition Processor ---"
277+
LTP_POD=$(kubectl get pods -n "${NAMESPACE}" -l "app.kubernetes.io/name=backbeat-lifecycle-transition-processor,app.kubernetes.io/instance=${ZENKO_NAME}" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
278+
if [ -n "${LTP_POD}" ]; then
279+
kubectl logs -n "${NAMESPACE}" "${LTP_POD}" --tail=30 2>/dev/null || echo "Cannot get logs"
280+
else
281+
echo "No lifecycle transition processor pod"
282+
fi
283+
echo ""
284+
285+
echo "=== CONFIGMAPS ==="
286+
echo "Sorbet-related ConfigMaps:"
287+
kubectl get configmaps -n "${NAMESPACE}" -l "app.kubernetes.io/instance=${ZENKO_NAME}" -o json | \
288+
jq '.items[] | select(.metadata.name | contains("sorbet")) | {name: .metadata.name, keys: (.data | keys)}' || echo "No sorbet configmaps"
289+
echo ""
290+
291+
echo "=== RECENT EVENTS (last 50) ==="
292+
kubectl get events -n "${NAMESPACE}" --sort-by='.lastTimestamp' | tail -50 || echo "No events"
293+
echo ""
294+
295+
echo "=================================================="
296+
echo "END OF DUMP"
297+
echo "=================================================="
298+

.github/workflows/end2end.yaml

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -517,11 +517,24 @@ jobs:
517517
ZENKO_NAME: "${{ env.ZENKO_NAME }}-pra"
518518
SETUP_IMAGE: ${{ needs.build-setup-image.outputs.image }}
519519
GIT_ACCESS_TOKEN: ${{ steps.app-token.outputs.token }}
520-
run: ./setup-tests.sh --kubeconfig ~/.kube/config -- all --no-kafka-topics --no-rbac --no-dns --no-mocks --no-metadata
520+
run: ./setup-tests.sh --kubeconfig ~/.kube/config -- setup --locations --accounts --workflows --notifications
521521
working-directory: ./tests/@setup
522522
- name: Configure hosts file
523523
run: bash configure-hosts.sh
524524
working-directory: ./.github/scripts/end2end
525+
- name: Dump setup state for PRIMARY instance
526+
run: bash dump-setup-state.sh default end2end > /tmp/v2-primary-dump.txt 2>&1
527+
working-directory: ./.github/scripts/end2end
528+
continue-on-error: true
529+
- name: Dump setup state for PRA instance
530+
run: bash dump-setup-state.sh default end2end-pra > /tmp/v2-pra-dump.txt 2>&1
531+
working-directory: ./.github/scripts/end2end
532+
continue-on-error: true
533+
- name: Show diagnostic dumps
534+
run: |
535+
echo "=== PRIMARY INSTANCE DUMP ===" && cat /tmp/v2-primary-dump.txt
536+
echo "=== PRA INSTANCE DUMP ===" && cat /tmp/v2-pra-dump.txt
537+
continue-on-error: true
525538
- name: Run CTST end to end tests
526539
env:
527540
E2E_CTST_IMAGE: ${{ env.E2E_CTST_IMAGE_NAME }}:${{ env.E2E_IMAGE_TAG }}
@@ -727,6 +740,13 @@ jobs:
727740
- name: Configure hosts file
728741
run: bash configure-hosts.sh
729742
working-directory: ./.github/scripts/end2end
743+
- name: Dump setup state before tests
744+
run: bash dump-setup-state.sh default end2end > /tmp/v2-ctst-sharded-dump.txt 2>&1
745+
working-directory: ./.github/scripts/end2end
746+
continue-on-error: true
747+
- name: Show diagnostic dump
748+
run: cat /tmp/v2-ctst-sharded-dump.txt
749+
continue-on-error: true
730750
- name: Run CTST end to end tests
731751
env:
732752
E2E_CTST_IMAGE: ${{ env.E2E_CTST_IMAGE_NAME }}:${{ env.E2E_IMAGE_TAG }}

tests/@setup/src/accounts.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ import { join } from 'path';
1717

1818
export interface AccountOptions {
1919
namespace: string;
20+
zenkoName?: string;
2021
accounts?: string[];
2122
configFile?: string;
2223
}
@@ -75,7 +76,7 @@ export async function setupAccounts(options: AccountOptions): Promise<void> {
7576
});
7677

7778
logger.info('Connecting to management API...');
78-
const managementEndpoint = await getManagementEndpoint();
79+
const managementEndpoint = await getManagementEndpoint(options.zenkoName, namespace);
7980
const authToken = await getManagementToken();
8081

8182
for (const accountConfig of accountsToCreate) {

0 commit comments

Comments
 (0)