@@ -744,11 +744,13 @@ def __call__(
744744 )
745745
746746 if negative_prompt_embeds_qwen is None :
747- negative_prompt_embeds_qwen , negative_prompt_embeds_clip , negative_cu_seqlens = self .encode_prompt (
748- prompt = negative_prompt ,
749- max_sequence_length = max_sequence_length ,
750- device = device ,
751- dtype = dtype ,
747+ negative_prompt_embeds_qwen , negative_prompt_embeds_clip , negative_prompt_cu_seqlens = (
748+ self .encode_prompt (
749+ prompt = negative_prompt ,
750+ max_sequence_length = max_sequence_length ,
751+ device = device ,
752+ dtype = dtype ,
753+ )
752754 )
753755
754756 # 4. Prepare timesteps
@@ -780,8 +782,8 @@ def __call__(
780782 text_rope_pos = torch .arange (prompt_cu_seqlens .diff ().max ().item (), device = device )
781783
782784 negative_text_rope_pos = (
783- torch .arange (negative_cu_seqlens .diff ().max ().item (), device = device )
784- if negative_cu_seqlens is not None
785+ torch .arange (negative_prompt_cu_seqlens .diff ().max ().item (), device = device )
786+ if negative_prompt_cu_seqlens is not None
785787 else None
786788 )
787789
0 commit comments