@@ -2572,4 +2572,99 @@ describe.skip("InferenceClient", () => {
25722572 } ,
25732573 TIMEOUT
25742574 ) ;
2575+ describe . concurrent (
2576+ "SiliconFlow" ,
2577+ ( ) => {
2578+ const client = new InferenceClient ( env . HF_SILICONFLOW_KEY ?? "dummy" ) ;
2579+
2580+ HARDCODED_MODEL_INFERENCE_MAPPING [ "siliconflow" ] = {
2581+ "deepseek-ai/DeepSeek-R1" : {
2582+ provider : "siliconflow" ,
2583+ hfModelId : "deepseek-ai/DeepSeek-R1" ,
2584+ providerId : "deepseek-ai/DeepSeek-R1" ,
2585+ status : "live" ,
2586+ task : "conversational" ,
2587+ } ,
2588+ "deepseek-ai/DeepSeek-V3" : {
2589+ provider : "siliconflow" ,
2590+ hfModelId : "deepseek-ai/DeepSeek-V3" ,
2591+ providerId : "deepseek-ai/DeepSeek-V3" ,
2592+ status : "live" ,
2593+ task : "conversational" ,
2594+ } ,
2595+ } ;
2596+
2597+ it ( "chatCompletion - DeepSeek-R1" , async ( ) => {
2598+ const res = await client . chatCompletion ( {
2599+ model : "deepseek-ai/DeepSeek-R1" ,
2600+ provider : "siliconflow" ,
2601+ messages : [ { role : "user" , content : "What is the capital of France?" } ] ,
2602+ max_tokens : 20 ,
2603+ } ) ;
2604+ if ( res . choices && res . choices . length > 0 ) {
2605+ const completion = res . choices [ 0 ] . message ?. content ;
2606+ expect ( completion ) . toBeDefined ( ) ;
2607+ expect ( typeof completion ) . toBe ( "string" ) ;
2608+ expect ( completion ) . toMatch ( / P a r i s / i) ;
2609+ }
2610+ } ) ;
2611+
2612+ it ( "chatCompletion - DeepSeek-V3" , async ( ) => {
2613+ const res = await client . chatCompletion ( {
2614+ model : "deepseek-ai/DeepSeek-V3" ,
2615+ provider : "siliconflow" ,
2616+ messages : [ { role : "user" , content : "The weather today is" } ] ,
2617+ max_tokens : 10 ,
2618+ } ) ;
2619+ expect ( res . choices ) . toBeDefined ( ) ;
2620+ expect ( res . choices ?. length ) . toBeGreaterThan ( 0 ) ;
2621+ expect ( res . choices ?. [ 0 ] . message ?. content ) . toBeDefined ( ) ;
2622+ expect ( typeof res . choices ?. [ 0 ] . message ?. content ) . toBe ( "string" ) ;
2623+ expect ( res . choices ?. [ 0 ] . message ?. content ?. length ) . toBeGreaterThan ( 0 ) ;
2624+ } ) ;
2625+
2626+ it ( "chatCompletion stream - DeepSeek-R1" , async ( ) => {
2627+ const stream = client . chatCompletionStream ( {
2628+ model : "deepseek-ai/DeepSeek-R1" ,
2629+ provider : "siliconflow" ,
2630+ messages : [ { role : "user" , content : "Say 'this is a test'" } ] ,
2631+ stream : true ,
2632+ } ) as AsyncGenerator < ChatCompletionStreamOutput > ;
2633+
2634+ let fullResponse = "" ;
2635+ for await ( const chunk of stream ) {
2636+ if ( chunk . choices && chunk . choices . length > 0 ) {
2637+ const content = chunk . choices [ 0 ] . delta ?. content ;
2638+ if ( content ) {
2639+ fullResponse += content ;
2640+ }
2641+ }
2642+ }
2643+ expect ( fullResponse ) . toBeTruthy ( ) ;
2644+ expect ( fullResponse . length ) . toBeGreaterThan ( 0 ) ;
2645+ } ) ;
2646+
2647+ it ( "chatCompletion stream - DeepSeek-V3" , async ( ) => {
2648+ const stream = client . chatCompletionStream ( {
2649+ model : "deepseek-ai/DeepSeek-V3" ,
2650+ provider : "siliconflow" ,
2651+ messages : [ { role : "user" , content : "Say 'this is a test'" } ] ,
2652+ stream : true ,
2653+ } ) as AsyncGenerator < ChatCompletionStreamOutput > ;
2654+
2655+ let fullResponse = "" ;
2656+ for await ( const chunk of stream ) {
2657+ if ( chunk . choices && chunk . choices . length > 0 ) {
2658+ const content = chunk . choices [ 0 ] . delta ?. content ;
2659+ if ( content ) {
2660+ fullResponse += content ;
2661+ }
2662+ }
2663+ }
2664+ expect ( fullResponse ) . toBeTruthy ( ) ;
2665+ expect ( fullResponse . length ) . toBeGreaterThan ( 0 ) ;
2666+ } ) ;
2667+ } ,
2668+ TIMEOUT
2669+ ) ;
25752670} ) ;
0 commit comments