Skip to content

Commit ede2e75

Browse files
committed
Fix density test
1 parent 0400339 commit ede2e75

File tree

1 file changed

+61
-68
lines changed

1 file changed

+61
-68
lines changed

test/e2e/density.go

Lines changed: 61 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -212,83 +212,76 @@ var _ = Describe("Density", func() {
212212

213213
type Density struct {
214214
skip bool
215-
totalPods int
216215
podsPerMinion int
217-
rcsPerThread int
218216
}
219217

220-
//This test should always run, even if larger densities are skipped.
221-
d3 := Density{totalPods: 3, podsPerMinion: 0, rcsPerThread: 1, skip: false}
222-
223-
//These tests are varied and customizable.
224-
//TODO (wojtek-t):don't skip d30 after #6059
225-
d30 := Density{totalPods: 30, podsPerMinion: 0, rcsPerThread: 1, skip: true}
226-
d50 := Density{totalPods: 50, podsPerMinion: 0, rcsPerThread: 1, skip: true}
227-
d100 := Density{totalPods: 100, podsPerMinion: 0, rcsPerThread: 1, skip: true}
228-
d500t5 := Density{totalPods: 500, podsPerMinion: 10, rcsPerThread: 5, skip: true}
229-
d500t25 := Density{totalPods: 500, podsPerMinion: 10, rcsPerThread: 25, skip: true}
218+
densityTests := []Density{
219+
// This test should always run, even if larger densities are skipped.
220+
{podsPerMinion: 3, skip: false},
221+
// TODO (wojtek-t):don't skip d30 after #6059
222+
{podsPerMinion: 30, skip: true},
223+
{podsPerMinion: 50, skip: true},
224+
{podsPerMinion: 100, skip: true},
225+
}
230226

231-
dtests := []Density{d3, d30, d50, d100, d500t5, d500t25}
227+
for _, testArg := range densityTests {
228+
name := fmt.Sprintf("should allow starting %d pods per node", testArg.podsPerMinion)
229+
if testArg.skip {
230+
name = "[Skipped] " + name
231+
}
232+
itArg := testArg
233+
It(name, func() {
234+
totalPods := itArg.podsPerMinion * minionCount
235+
RCName = "my-hostname-density" + strconv.Itoa(totalPods) + "-" + string(util.NewUUID())
236+
RunRC(c, RCName, ns, "gcr.io/google_containers/pause:go", totalPods)
237+
})
238+
}
232239

233-
//Run each test in the array which isn't skipped.
234-
for i := range dtests {
240+
type Scalability struct {
241+
skip bool
242+
totalPods int
243+
podsPerMinion int
244+
rcsPerThread int
245+
}
235246

236-
//cannot do a range iterator over structs.
237-
dtest := dtests[i]
247+
scalabilityTests := []Scalability{
248+
{totalPods: 500, podsPerMinion: 10, rcsPerThread: 5, skip: true},
249+
{totalPods: 500, podsPerMinion: 10, rcsPerThread: 25, skip: true},
250+
}
238251

239-
//if ppm==0, its a raw density test.
240-
//otherwise, we continue launching n nodes per pod in threads till we meet the totalPods #.
241-
if dtest.podsPerMinion == 0 {
242-
//basic density tests
243-
name := fmt.Sprintf("should allow starting %d pods per node", dtest.totalPods)
252+
for _, testArg := range scalabilityTests {
253+
// # of threads calibrate to totalPods
254+
threads := (testArg.totalPods / (testArg.podsPerMinion * testArg.rcsPerThread))
244255

245-
if dtest.skip {
246-
name = "[Skipped] " + name
247-
}
248-
It(name, func() {
249-
glog.Info("Density test parameters: %v", dtest)
250-
RCName = "my-hostname-density" + strconv.Itoa(dtest.totalPods) + "-" + string(util.NewUUID())
251-
RunRC(c, RCName, ns, "gcr.io/google_containers/pause:go", dtest.totalPods)
252-
})
253-
} else {
254-
// # of threads calibrate to totalPods
255-
threads := (dtest.totalPods / (dtest.podsPerMinion * dtest.rcsPerThread))
256-
257-
name := fmt.Sprintf(
258-
"[Skipped] should be able to launch %v pods, %v per minion, in %v rcs/thread.",
259-
dtest.totalPods, dtest.podsPerMinion, dtest.rcsPerThread)
260-
261-
if dtest.skip {
262-
name = "[Skipped] " + name
263-
}
256+
name := fmt.Sprintf(
257+
"should be able to launch %v pods, %v per minion, in %v rcs/thread.",
258+
testArg.totalPods, testArg.podsPerMinion, testArg.rcsPerThread)
259+
if testArg.skip {
260+
name = "[Skipped] " + name
261+
}
264262

263+
itArg := testArg
264+
It(name, func() {
265265
podsLaunched := 0
266-
It(name, func() {
267-
268-
var wg sync.WaitGroup
269-
270-
//count down latch.., once all threads are launched, we wait for
271-
//it to decrement down to zero.
272-
wg.Add(threads)
273-
274-
//create queue of pending requests on the api server.
275-
for i := 0; i < threads; i++ {
276-
go func() {
277-
// call to wg.Done will serve as a count down latch.
278-
defer wg.Done()
279-
for i := 0; i < dtest.rcsPerThread; i++ {
280-
name := "my-short-lived-pod" + string(util.NewUUID())
281-
n := dtest.podsPerMinion * minionCount
282-
RunRC(c, name, ns, "gcr.io/google_containers/pause:go", n)
283-
podsLaunched += n
284-
glog.Info("Launched %v pods so far...", podsLaunched)
285-
}
286-
}()
287-
}
288-
//Wait for all the pods from all the RC's to return.
289-
wg.Wait()
290-
glog.Info("%v pods out of %v launched", podsLaunched, dtest.totalPods)
291-
})
292-
}
266+
var wg sync.WaitGroup
267+
wg.Add(threads)
268+
269+
// Create queue of pending requests on the api server.
270+
for i := 0; i < threads; i++ {
271+
go func() {
272+
defer wg.Done()
273+
for i := 0; i < itArg.rcsPerThread; i++ {
274+
name := "my-short-lived-pod" + string(util.NewUUID())
275+
n := itArg.podsPerMinion * minionCount
276+
RunRC(c, name, ns, "gcr.io/google_containers/pause:go", n)
277+
podsLaunched += n
278+
glog.Info("Launched %v pods so far...", podsLaunched)
279+
}
280+
}()
281+
}
282+
// Wait for all the pods from all the RC's to return.
283+
wg.Wait()
284+
glog.Info("%v pods out of %v launched", podsLaunched, itArg.totalPods)
285+
})
293286
}
294287
})

0 commit comments

Comments
 (0)