@@ -75,7 +75,7 @@ static inline uint64_t host_time_ns()
7575 * 'scale_factor' so the entire boot completes in SEMU_BOOT_TARGET_TIME
7676 * seconds.
7777 */
78- static void measure_bogomips_ns (uint64_t iterations )
78+ static void measure_bogomips_ns (uint64_t iterations , int hart_count )
7979{
8080 /* Perform 'iterations' times calling the host HRT.
8181 *
@@ -114,13 +114,14 @@ static void measure_bogomips_ns(uint64_t iterations)
114114 (double ) (elapsed_ns_2 - elapsed_ns_1 ) / (double ) iterations ;
115115
116116 /* 'semu_timer_clocksource' is called ~2e8 times per SMP. Each call's
117- * overhead ~ ns_per_call. The total overhead is ~ ns_per_call * SMP * 2e8.
118- * That overhead is about 10% of the entire boot, so effectively:
117+ * overhead ~ ns_per_call. The total overhead is ~ ns_per_call * SMP *
118+ * 2e8. That overhead is about 10~40% of the entire boot, we take the
119+ * minimum here to get more fault tolerance. Thus, effectively:
119120 * predict_sec = ns_per_call * SMP * 2e8 * (100%/10%) / 1e9
120121 * = ns_per_call * SMP * 2.0
121122 * Then scale_factor = (desired_time) / (predict_sec).
122123 */
123- const double predict_sec = ns_per_call * SEMU_SMP * 2.0 ;
124+ const double predict_sec = ns_per_call * hart_count * 2.0 ;
124125 scale_factor = SEMU_BOOT_TARGET_TIME / predict_sec ;
125126}
126127
@@ -188,14 +189,13 @@ static uint64_t semu_timer_clocksource(semu_timer_t *timer)
188189#endif
189190}
190191
191- void semu_timer_init (semu_timer_t * timer , uint64_t freq )
192+ void semu_timer_init (semu_timer_t * timer , uint64_t freq , int hart_count )
192193{
193194 /* Measure how long each call to 'host_time_ns()' roughly takes,
194195 * then use that to pick 'scale_factor'. For example, pass freq
195196 * as the loop count or some large number to get a stable measure.
196197 */
197- measure_bogomips_ns (freq );
198-
198+ measure_bogomips_ns (freq , hart_count );
199199 timer -> freq = freq ;
200200 semu_timer_rebase (timer , 0 );
201201}
0 commit comments