// // Generated by NVIDIA NVVM Compiler // // Compiler Build ID: CL-31678015 // Cuda compilation tools, release 11.8, V11.8.85 // Based on NVVM 7.0.1 // .version 7.8 .target sm_60 .address_size 64 // .globl _Z17dequantize_matrix13DequantParams .visible .entry _Z17dequantize_matrix13DequantParams( .param .align 8 .b8 _Z17dequantize_matrix13DequantParams_param_0[64] ) { .reg .pred %p<6>; .reg .b16 %rs<118>; .reg .f32 %f<35>; .reg .b32 %r<90>; .reg .b64 %rd<17>; ld.param.v2.u32 {%r15, %r16}, [_Z17dequantize_matrix13DequantParams_param_0+48]; ld.param.v2.u32 {%r19, %r20}, [_Z17dequantize_matrix13DequantParams_param_0+40]; ld.param.v4.u8 {%rs48, %rs49, %rs50, %rs51}, [_Z17dequantize_matrix13DequantParams_param_0+60]; ld.param.u32 %r13, [_Z17dequantize_matrix13DequantParams_param_0+56]; ld.param.u64 %rd5, [_Z17dequantize_matrix13DequantParams_param_0+24]; ld.param.u64 %rd4, [_Z17dequantize_matrix13DequantParams_param_0+16]; ld.param.u64 %rd3, [_Z17dequantize_matrix13DequantParams_param_0+8]; ld.param.u64 %rd2, [_Z17dequantize_matrix13DequantParams_param_0]; mov.u32 %r21, %ntid.x; mov.u32 %r22, %ctaid.x; mov.u32 %r23, %tid.x; mad.lo.s32 %r24, %r22, %r21, %r23; shl.b32 %r1, %r24, 3; mov.u32 %r25, %ntid.y; mov.u32 %r26, %ctaid.y; mov.u32 %r27, %tid.y; mad.lo.s32 %r2, %r26, %r25, %r27; mad.lo.s32 %r28, %r15, %r2, %r1; shr.u32 %r29, %r28, 31; add.s32 %r30, %r28, %r29; shr.s32 %r31, %r30, 1; cvt.s64.s32 %rd7, %r31; add.s64 %rd6, %rd3, %rd7; // begin inline asm ld.global.cs.u32 %r14, [%rd6]; // end inline asm cvt.u16.u32 %rs1, %r14; and.b16 %rs2, %rs1, 15; shr.u32 %r32, %r14, 4; cvt.u16.u32 %rs3, %r32; and.b16 %rs4, %rs3, 15; shr.u32 %r33, %r14, 8; cvt.u16.u32 %rs5, %r33; and.b16 %rs6, %rs5, 15; shr.u32 %r34, %r14, 12; cvt.u16.u32 %rs7, %r34; and.b16 %rs8, %rs7, 15; shr.u32 %r35, %r14, 16; cvt.u16.u32 %rs9, %r35; and.b16 %rs10, %rs9, 15; shr.u32 %r36, %r14, 20; cvt.u16.u32 %rs11, %r36; and.b16 %rs12, %rs11, 15; shr.u32 %r37, %r14, 24; cvt.u16.u32 %rs13, %r37; and.b16 %rs14, %rs13, 15; shr.u32 %r4, %r14, 28; div.s32 %r5, %r1, %r19; cvta.to.global.u64 %rd8, %rd4; mad.lo.s32 %r38, %r16, %r2, %r5; mul.wide.s32 %rd9, %r38, 2; add.s64 %rd10, %rd8, %rd9; ld.global.u16 %rs47, [%rd10]; // begin inline asm { cvt.f32.f16 %f2, %rs47;} // end inline asm shl.b16 %rs109, %rs48, 3; setp.eq.s64 %p1, %rd5, 0; @%p1 bra $L__BB0_2; mad.lo.s32 %r39, %r13, %r2, %r5; shr.s32 %r40, %r39, 1; cvt.s64.s32 %rd11, %r40; cvta.to.global.u64 %rd12, %rd5; add.s64 %rd13, %rd12, %rd11; ld.global.u8 %r41, [%rd13]; shl.b32 %r42, %r39, 2; and.b32 %r43, %r42, 4; shr.u32 %r44, %r41, %r43; cvt.u16.u32 %rs52, %r44; and.b16 %rs109, %rs52, 15; $L__BB0_2: shr.u16 %rs53, %rs109, 3; and.b16 %rs54, %rs53, 1; setp.eq.b16 %p2, %rs54, 1; setp.eq.s16 %p3, %rs48, 0; and.pred %p4, %p3, %p2; selp.b16 %rs56, -16, 0, %p4; or.b16 %rs57, %rs56, %rs109; cvt.u32.u16 %r45, %rs57; cvt.s32.s8 %r6, %r45; @%p3 bra $L__BB0_4; cvt.u32.u16 %r46, %rs2; sub.s32 %r47, %r46, %r6; cvt.rn.f32.s32 %f11, %r47; mul.ftz.f32 %f3, %f2, %f11; // begin inline asm { cvt.rn.f16.f32 %rs110, %f3;} // end inline asm cvt.u32.u16 %r48, %rs4; sub.s32 %r49, %r48, %r6; cvt.rn.f32.s32 %f12, %r49; mul.ftz.f32 %f4, %f2, %f12; // begin inline asm { cvt.rn.f16.f32 %rs111, %f4;} // end inline asm cvt.u32.u16 %r50, %rs6; sub.s32 %r51, %r50, %r6; cvt.rn.f32.s32 %f13, %r51; mul.ftz.f32 %f5, %f2, %f13; // begin inline asm { cvt.rn.f16.f32 %rs112, %f5;} // end inline asm cvt.u32.u16 %r52, %rs8; sub.s32 %r53, %r52, %r6; cvt.rn.f32.s32 %f14, %r53; mul.ftz.f32 %f6, %f2, %f14; // begin inline asm { cvt.rn.f16.f32 %rs113, %f6;} // end inline asm cvt.u32.u16 %r54, %rs10; sub.s32 %r55, %r54, %r6; cvt.rn.f32.s32 %f15, %r55; mul.ftz.f32 %f7, %f2, %f15; // begin inline asm { cvt.rn.f16.f32 %rs114, %f7;} // end inline asm cvt.u32.u16 %r56, %rs12; sub.s32 %r57, %r56, %r6; cvt.rn.f32.s32 %f16, %r57; mul.ftz.f32 %f8, %f2, %f16; // begin inline asm { cvt.rn.f16.f32 %rs115, %f8;} // end inline asm cvt.u32.u16 %r58, %rs14; sub.s32 %r59, %r58, %r6; cvt.rn.f32.s32 %f17, %r59; mul.ftz.f32 %f9, %f2, %f17; // begin inline asm { cvt.rn.f16.f32 %rs116, %f9;} // end inline asm sub.s32 %r60, %r4, %r6; cvt.rn.f32.s32 %f18, %r60; mul.ftz.f32 %f10, %f2, %f18; // begin inline asm { cvt.rn.f16.f32 %rs117, %f10;} // end inline asm bra.uni $L__BB0_5; $L__BB0_4: shl.b16 %rs74, %rs1, 4; cvt.s16.s8 %rs75, %rs74; shr.u16 %rs76, %rs75, 7; and.b16 %rs77, %rs76, 240; or.b16 %rs78, %rs77, %rs2; cvt.u32.u16 %r61, %rs78; cvt.s32.s8 %r62, %r61; sub.s32 %r63, %r62, %r6; cvt.rn.f32.s32 %f27, %r63; mul.ftz.f32 %f19, %f2, %f27; // begin inline asm { cvt.rn.f16.f32 %rs110, %f19;} // end inline asm shl.b16 %rs79, %rs3, 4; cvt.s16.s8 %rs80, %rs79; shr.u16 %rs81, %rs80, 7; and.b16 %rs82, %rs81, 240; or.b16 %rs83, %rs82, %rs4; cvt.u32.u16 %r64, %rs83; cvt.s32.s8 %r65, %r64; sub.s32 %r66, %r65, %r6; cvt.rn.f32.s32 %f28, %r66; mul.ftz.f32 %f20, %f2, %f28; // begin inline asm { cvt.rn.f16.f32 %rs111, %f20;} // end inline asm shl.b16 %rs84, %rs5, 4; cvt.s16.s8 %rs85, %rs84; shr.u16 %rs86, %rs85, 7; and.b16 %rs87, %rs86, 240; or.b16 %rs88, %rs87, %rs6; cvt.u32.u16 %r67, %rs88; cvt.s32.s8 %r68, %r67; sub.s32 %r69, %r68, %r6; cvt.rn.f32.s32 %f29, %r69; mul.ftz.f32 %f21, %f2, %f29; // begin inline asm { cvt.rn.f16.f32 %rs112, %f21;} // end inline asm shl.b16 %rs89, %rs7, 4; cvt.s16.s8 %rs90, %rs89; shr.u16 %rs91, %rs90, 7; and.b16 %rs92, %rs91, 240; or.b16 %rs93, %rs92, %rs8; cvt.u32.u16 %r70, %rs93; cvt.s32.s8 %r71, %r70; sub.s32 %r72, %r71, %r6; cvt.rn.f32.s32 %f30, %r72; mul.ftz.f32 %f22, %f2, %f30; // begin inline asm { cvt.rn.f16.f32 %rs113, %f22;} // end inline asm shl.b16 %rs94, %rs9, 4; cvt.s16.s8 %rs95, %rs94; shr.u16 %rs96, %rs95, 7; and.b16 %rs97, %rs96, 240; or.b16 %rs98, %rs97, %rs10; cvt.u32.u16 %r73, %rs98; cvt.s32.s8 %r74, %r73; sub.s32 %r75, %r74, %r6; cvt.rn.f32.s32 %f31, %r75; mul.ftz.f32 %f23, %f2, %f31; // begin inline asm { cvt.rn.f16.f32 %rs114, %f23;} // end inline asm shl.b16 %rs99, %rs11, 4; cvt.s16.s8 %rs100, %rs99; shr.u16 %rs101, %rs100, 7; and.b16 %rs102, %rs101, 240; or.b16 %rs103, %rs102, %rs12; cvt.u32.u16 %r76, %rs103; cvt.s32.s8 %r77, %r76; sub.s32 %r78, %r77, %r6; cvt.rn.f32.s32 %f32, %r78; mul.ftz.f32 %f24, %f2, %f32; // begin inline asm { cvt.rn.f16.f32 %rs115, %f24;} // end inline asm shl.b16 %rs104, %rs13, 4; cvt.s16.s8 %rs105, %rs104; shr.u16 %rs106, %rs105, 7; and.b16 %rs107, %rs106, 240; or.b16 %rs108, %rs107, %rs14; cvt.u32.u16 %r79, %rs108; cvt.s32.s8 %r80, %r79; sub.s32 %r81, %r80, %r6; cvt.rn.f32.s32 %f33, %r81; mul.ftz.f32 %f25, %f2, %f33; // begin inline asm { cvt.rn.f16.f32 %rs116, %f25;} // end inline asm setp.gt.s32 %p5, %r14, -1; selp.b32 %r82, 0, -16, %p5; or.b32 %r83, %r82, %r4; sub.s32 %r84, %r83, %r6; cvt.rn.f32.s32 %f34, %r84; mul.ftz.f32 %f26, %f2, %f34; // begin inline asm { cvt.rn.f16.f32 %rs117, %f26;} // end inline asm $L__BB0_5: mad.lo.s32 %r85, %r20, %r2, %r1; cvta.to.global.u64 %rd14, %rd2; mul.wide.s32 %rd15, %r85, 2; add.s64 %rd16, %rd14, %rd15; mov.b32 %r86, {%rs116, %rs117}; mov.b32 %r87, {%rs114, %rs115}; mov.b32 %r88, {%rs112, %rs113}; mov.b32 %r89, {%rs110, %rs111}; st.global.v4.u32 [%rd16], {%r89, %r88, %r87, %r86}; ret; }