diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index cdc2dee2c218..f78fd4b53b3c 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -17,6 +17,8 @@ limitations under the License. package core import ( + "strconv" + "strings" "testing" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" @@ -25,19 +27,44 @@ import ( "github.com/karmada-io/karmada/test/helper" ) -// Test Case of even distribution of replicas, case 1 -// 1. create deployment (replicas=3), weight=1:1 -// 2. check two member cluster replicas, should be 2:1 or 1:2 -func Test_genericScheduler_AssignReplicas(t *testing.T) { - tests := []struct { - name string - clusters []*clusterv1alpha1.Cluster - placement *policyv1alpha1.Placement - object *workv1alpha2.ResourceBindingSpec - wants [][]workv1alpha2.TargetCluster - wantErr bool - }{ +type testcase struct { + name string + clusters []*clusterv1alpha1.Cluster + object *workv1alpha2.ResourceBindingSpec + placement *policyv1alpha1.Placement + // changeCondForNextSchedule following cases will schedule twice, this function aims to change the schedule condition for second schedule + changeCondForNextSchedule func(tt *testcase) + // wants key is first time schedule possible result, value is second time schedule possible result + wants map[string][]string + wantErr bool +} + +var clusterToIndex = map[string]int{ + ClusterMember1: 0, + ClusterMember2: 1, + ClusterMember3: 2, + ClusterMember4: 3, +} + +// isScheduleResultEqual change []workv1alpha2.TargetCluster to the string format just like 1:1:1, and compare it to expect string +func isScheduleResultEqual(tcs []workv1alpha2.TargetCluster, expect string) bool { + res := make([]string, len(tcs)) + for _, cluster := range tcs { + idx := clusterToIndex[cluster.Name] + res[idx] = strconv.Itoa(int(cluster.Replicas)) + } + actual := strings.Join(res, ":") + return actual == expect +} + +// These are acceptance test cases given by QA for requirement: dividing replicas by static weight evenly +// https://github.com/karmada-io/karmada/issues/4220 +func Test_EvenDistributionOfReplicas(t *testing.T) { + tests := []*testcase{ { + // Test Case No.1 of even distribution of replicas + // 1. create deployment (replicas=3), weight=1:1 + // 2. check two member cluster replicas, should be 2:1 or 1:2 name: "replica 3, static weighted 1:1", clusters: []*clusterv1alpha1.Cluster{ helper.NewCluster(ClusterMember1), @@ -58,32 +85,85 @@ func Test_genericScheduler_AssignReplicas(t *testing.T) { }, }, }, - wants: [][]workv1alpha2.TargetCluster{ - { - {Name: ClusterMember1, Replicas: 1}, - {Name: ClusterMember2, Replicas: 2}, - }, - { - {Name: ClusterMember1, Replicas: 2}, - {Name: ClusterMember2, Replicas: 1}, + changeCondForNextSchedule: nil, + wants: map[string][]string{ + "1:2": {}, + "2:1": {}, + }, + wantErr: false, + }, + { + // Test Case No.2 of even distribution of replicas + // 1. create deployment (replicas=3), weight=1:1:1 + // 2. check three member cluster replicas, should be 1:1:1 + // 3. update replicas from 3 to 5 + // 4. check three member cluster replicas, should be 2:2:1 or 2:1:2 or 1:2:2 + name: "replica 3, static weighted 1:1:1, change replicas from 3 to 5", + clusters: []*clusterv1alpha1.Cluster{ + helper.NewCluster(ClusterMember1), + helper.NewCluster(ClusterMember2), + helper.NewCluster(ClusterMember3), + }, + object: &workv1alpha2.ResourceBindingSpec{ + Replicas: 3, + }, + placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + }, + }, }, }, + changeCondForNextSchedule: func(tt *testcase) { + tt.object.Replicas = 5 + }, + wants: map[string][]string{ + "1:1:1": {"2:2:1", "2:1:2", "1:2:2"}, + }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - g := &genericScheduler{} + var g = &genericScheduler{} + var firstScheduleResult, secondScheduleResult string + + // 1. schedule for the first time, and check whether first schedule result within tt.wants got, err := g.assignReplicas(tt.clusters, tt.placement, tt.object) if (err != nil) != tt.wantErr { t.Errorf("AssignReplicas() error = %v, wantErr %v", err, tt.wantErr) return } - if tt.wantErr { + for firstScheduleResult = range tt.wants { + if isScheduleResultEqual(got, firstScheduleResult) { + break + } + } + if !isScheduleResultEqual(got, firstScheduleResult) { + t.Errorf("AssignReplicas() got = %v, wants %v", got, tt.wants) + return + } + + // 2. change the schedule condition + if tt.changeCondForNextSchedule == nil { + return + } + tt.changeCondForNextSchedule(tt) + + // 3. schedule for the second time, and check whether second schedule result within tt.wants + got, err = g.assignReplicas(tt.clusters, tt.placement, tt.object) + if (err != nil) != tt.wantErr { + t.Errorf("AssignReplicas() error = %v, wantErr %v", err, tt.wantErr) return } - for _, want := range tt.wants { - if helper.IsScheduleResultEqual(got, want) { + for _, secondScheduleResult = range tt.wants[firstScheduleResult] { + if isScheduleResultEqual(got, secondScheduleResult) { return } }