tiny fix
Former-commit-id: 7451b2ae7e58d0f1857f01a037672a8c53b1bd0d
This commit is contained in:
@@ -173,7 +173,7 @@ class LlamaFlashAttention2(LlamaAttention):
|
||||
state = state.reshape(bsz * num_group, group_size, self.num_heads, self.head_dim)
|
||||
|
||||
if attention_mask is not None:
|
||||
logger.warning_once("Padded sequences are less efficient.")
|
||||
logger.warning_once("Padded sequences are less efficient in FlashAttention.")
|
||||
batch_size = query_states.shape[0]
|
||||
# -q_len: assumes left padding
|
||||
unpadded_q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(query_states, attention_mask[:, -q_len:])
|
||||
|
||||
Reference in New Issue
Block a user