params]; ld.const.f32 %f6, [params+4]; mov.b32 {%rs19, %rs2}, %r1; // begin inline asm { cvt.f32.f16 %f47, %rs19;} // end inline asm mul.ftz.f32 %f7, %f47, %f6; abs.ftz.f32 %f8, %f7; setp.ltu.ftz.f32 %p1, %f8, 0f3F19999A; @%p1 bra $L__BB0_2; bra.uni $L__BB0_1; $L__BB0_2: mul.ftz.f32 %f56, %f7, %f7; mov.f32 %f57, 0fBD563CAE; mov.f32 %f58, 0f3C80F082; fma.rn.ftz.f32 %f59, %f58, %f56, %f57; mov.f32 %f60, 0f3E085941; fma.rn.ftz.f32 %f61, %f59, %f56, %f60; mov.f32 %f62, 0fBEAAA9ED; fma.rn.ftz.f32 %f63, %f61, %f56, %f62; mov.f32 %f64, 0f00000000; fma.rn.ftz.f32 %f65, %f63, %f56, %f64; fma.rn.ftz.f32 %f211, %f65, %f7, %f7; bra.uni $L__BB0_3; $L__BB0_1: mul.ftz.f32 %f48, %f8, 0f4038AA3B; ex2.approx.ftz.f32 %f49, %f48; add.ftz.f32 %f50, %f49, 0f3F800000; mov.f32 %f51, 0f3F800000; rcp.approx.ftz.f32 %f52, %f50; mov.f32 %f53, 0fC0000000; fma.rn.ftz.f32 %f54, %f52, %f53, %f51; setp.ge.ftz.f32 %p2, %f8, 0f41102CB4; selp.f32 %f55, 0f3F800000, %f54, %p2; mov.b32 %r2, %f55; mov.b32 %r3, %f7; and.b32 %r4, %r3, -2147483648; or.b32 %r5, %r4, %r2; mov.b32 %f211, %r5; $L__BB0_3: mul.ftz.f32 %f66, %f5, %f211; // begin inline asm { cvt.rn.f16.f32 %rs20, %f66;} // end inline asm // begin inline asm { cvt.f32.f16 %f67, %rs2;} // end inline asm mul.ftz.f32 %f12, %f67, %f6; abs.ftz.f32 %f13, %f12; setp.ltu.ftz.f32 %p3, %f13, 0f3F19999A; @%p3 bra $L__BB0_5; bra.uni $L__BB0_4; $L__BB0_5: mul.ftz.f32 %f76, %f12, %f12; mov.f32 %f77, 0fBD563CAE; mov.f32 %f78, 0f3C80F082; fma.rn.ftz.f32 %f79, %f78, %f76, %f77; mov.f32 %f80, 0f3E085941; fma.rn.ftz.f32 %f81, %f79, %f76, %f80; mov.f32 %f82, 0fBEAAA9ED; fma.rn.ftz.f32 %f83, %f81, %f76, %f82; mov.f32 %f84, 0f00000000; fma.rn.ftz.f32 %f85, %f83, %f76, %f84; fma.rn.ftz.f32 %f212, %f85, %f12, %f12; bra.uni $L__BB0_6; $L__BB0_4: mul.ftz.f32 %f68, %f13, 0f4038AA3B; ex2.approx.ftz.f32 %f69, %f68; add.ftz.f32 %f70, %f69, 0f3F800000; mov.f32 %f71, 0f3F800000; rcp.approx.ftz.f32 %f72, %f70; mov.f32 %f73, 0fC0000000; fma.rn.ftz.f32 %f74, %f72, %f73, %f71; setp.ge.ftz.f32 %p4, %f13, 0f41102CB4; selp.f32 %f75, 0f3F800000, %f74, %p4; mov.b32 %r6, %f75; mov.b32 %r7, %f12; and.b32 %r8, %r7, -2147483648; or.b32 %r9, %r8, %r6; mov.b32 %f212, %r9; $L__BB0_6: mul.ftz.f32 %f86, %f5, %f212; // begin inline asm { cvt.rn.f16.f32 %rs22, %f86;} // end inline asm mov.b32 %r10, %f2; mov.b32 {%rs23, %rs7}, %r10; // begin inline asm { cvt.f32.f16 %f87, %rs23;} // end inline asm mul.ftz.f32 %f17, %f87, %f6; abs.ftz.f32 %f18, %f17; setp.ltu.ftz.f32 %p5, %f18, 0f3F19999A; @%p5 bra $L__BB0_8; bra.uni $L__BB0_7; $L__BB0_8: mul.ftz.f32 %f96, %f17, %f17; mov.f32 %f97, 0fBD563CAE; mov.f32 %f98, 0f3C80F082; fma.rn.ftz.f32 %f99, %f98, %f96, %f97; mov.f32 %f100, 0f3E085941; fma.rn.ftz.f32 %f101, %f99, %f96, %f100; mov.f32 %f102, 0fBEAAA9ED; fma.rn.ftz.f32 %f103, %f101, %f96, %f102; mov.f32 %f104, 0f00000000; fma.rn.ftz.f32 %f105, %f103, %f96, %f104; fma.rn.ftz.f32 %f213, %f105, %f17, %f17; bra.uni $L__BB0_9; $L__BB0_7: mul.ftz.f32 %f88, %f18, 0f4038AA3B; ex2.approx.ftz.f32 %f89, %f88; add.ftz.f32 %f90, %f89, 0f3F800000; mov.f32 %f91, 0f3F800000; rcp.approx.ftz.f32 %f92, %f90; mov.f32 %f93, 0fC0000000; fma.rn.ftz.f32 %f94, %f92, %f93, %f91; setp.ge.ftz.f32 %p6, %f18, 0f41102CB4; selp.f32 %f95, 0f3F800000, %f94, %p6; mov.b32 %r11, %f95; mov.b32 %r12, %f17; and.b32 %r13, %r12, -2147483648; or.b32 %r14, %r13, %r11; mov.b32 %f213, %r14; $L__BB0_9: mul.ftz.f32 %f106, %f5, %f213; // begin inline asm { cvt.rn.f16.f32 %rs24, %f106;} // end inline asm // begin inline asm { cvt.f32.f16 %f107, %rs7;} // end inline asm mul.ftz.f32 %f22, %f107, %f6; abs.ftz.f32 %f23, %f22; setp.ltu.ftz.f32 %p7, %f23, 0f3F19999A; @%p7 bra $L__BB0_11; bra.uni $L__BB0_10; $L__BB0_11: mul.ftz.f32 %f116, %f22, %f22; mov.f32 %f117, 0fBD563CAE; mov.f32 %f118, 0f3C80F082; fma.rn.ftz.f32 %f119, %f118, %f116, %f117; mov.f32 %f120, 0f3E085941; fma.rn.ftz.f32 %f121, %f119, %f116, %f120; mov.f32 %f122, 0fBEAAA9ED; fma.rn.ftz.f32 %f123, %f121, %f116, %f122; mov.f32 %f124, 0f00000000; fma.rn.ftz.f32 %f125, %f123, %f116, %f124; fma.rn.ftz.f32 %f214, %f125, %f22, %f22; bra.uni $L__BB0_12; $L__BB0_10: mul.ftz.f32 %f108, %f23, 0f4038AA3B; ex2.approx.ftz.f32 %f109, %f108; add.ftz.f32 %f110, %f109, 0f3F800000; mov.f32 %f111, 0f3F800000; rcp.approx.ftz.f32 %f112, %f110; mov.f32 %f113, 0fC0000000; fma.rn.ftz.f32 %f114, %f112, %f113, %f111; setp.ge.ftz.f32 %p8, %f23, 0f41102CB4; selp.f32 %f115, 0f3F800000, %f114, %p8; mov.b32 %r15, %f115; mov.b32 %r16, %f22; and.b32 %r17, %r16, -2147483648; or.b32 %r18, %r17, %r15; mov.b32 %f214, %r18; $L__BB0_12: mul.ftz.f32 %f126, %f5, %f214; // begin inline asm { cvt.rn.f16.f32 %rs26, %f126;} // end inline asm mov.b32 %r19, %f3; mov.b32 {%rs27, %rs12}, %r19; // begin inline asm { cvt.f32.f16 %f127, %rs27;} // end inline asm mul.ftz.f32 %f27, %f127, %f6; abs.ftz.f32 %f28, %f27; setp.ltu.ftz.f32 %p9, %f28, 0f3F19999A; @%p9 bra $L__BB0_14; bra.uni $L__BB0_13; $L__BB0_14: mul.ftz.f32 %f136, %f27, %f27; mov.f32 %f137, 0fBD563CAE; mov.f32 %f138, 0f3C80F082; fma.rn.ftz.f32 %f139, %f138, %f136, %f137; mov.f32 %f140, 0f3E085941; fma.rn.ftz.f32 %f141, %f139, %f136, %f140; mov.f32 %f142, 0fBEAAA9ED; fma.rn.ftz.f32 %f143, %f141, %f136, %f142; mov.f32 %f144, 0f00000000; fma.rn.ftz.f32 %f145, %f143, %f136, %f144; fma.rn.ftz.f32 %f215, %f145, %f27, %f27; bra.uni $L__BB0_15; $L__BB0_13: mul.ftz.f32 %f128, %f28, 0f4038AA3B; ex2.approx.ftz.f32 %f129, %f128; add.ftz.f32 %f130, %f129, 0f3F800000; mov.f32 %f131, 0f3F800000; rcp.approx.ftz.f32 %f132, %f130; mov.f32 %f133, 0fC0000000; fma.rn.ftz.f32 %f134, %f132, %f133, %f131; setp.ge.ftz.f32 %p10, %f28, 0f41102CB4; selp.f32 %f135, 0f3F800000, %f134, %p10; mov.b32 %r20, %f135; mov.b32 %r21, %f27; and.b32 %r22, %r21, -2147483648; or.b32 %r23, %r22, %r20; mov.b32 %f215, %r23; $L__BB0_15: mul.ftz.f32 %f146, %f5, %f215; // begin inline asm { cvt.rn.f16.f32 %rs28, %f146;} // end inline asm // begin inline asm { cvt.f32.f16 %f147, %rs12;} // end inline asm mul.ftz.f32 %f32, %f147, %f6; abs.ftz.f32 %f33, %f32; setp.ltu.ftz.f32 %p11, %f33, 0f3F19999A; @%p11 bra $L__BB0_17; bra.uni $L__BB0_16; $L__BB0_17: mul.ftz.f32 %f156, %f32, %f32; mov.f32 %f157, 0fBD563CAE; mov.f32 %f158, 0f3C80F082; fma.rn.ftz.f32 %f159, %f158, %f156, %f157; mov.f32 %f160, 0f3E085941; fma.rn.ftz.f32 %f161, %f159, %f156, %f160; mov.f32 %f162, 0fBEAAA9ED; fma.rn.ftz.f32 %f163, %f161, %f156, %f162; mov.f32 %f164, 0f00000000; fma.rn.ftz.f32 %f165, %f163, %f156, %f164; fma.rn.ftz.f32 %f216, %f165, %f32, %f32; bra.uni $L__BB0_18; $L__BB0_16: mul.ftz.f32 %f148, %f33, 0f4038AA3B; ex2.approx.ftz.f32 %f149, %f148; add.ftz.f32 %f150, %f149, 0f3F800000; mov.f32 %f151, 0f3F800000; rcp.approx.ftz.f32 %f152, %f150; mov.f32 %f153, 0fC0000000; fma.rn.ftz.f32 %f154, %f152, %f153, %f151; setp.ge.ftz.f32 %p12, %f33, 0f41102CB4; selp.f32 %f155, 0f3F800000, %f154, %p12; mov.b32 %r24, %f155; mov.b32 %r25, %f32; and.b32 %r26, %r25, -2147483648; or.b32 %r27, %r26, %r24; mov.b32 %f216, %r27; $L__BB0_18: mul.ftz.f32 %f166, %f5, %f216; // begin inline asm { cvt.rn.f16.f32 %rs30, %f166;} // end inline asm mov.b32 %r28, %f4; mov.b32 {%rs31, %rs17}, %r28; // begin inline asm { cvt.f32.f16 %f167, %rs31;} // end inline asm mul.ftz.f32 %f37, %f167, %f6; abs.ftz.f32 %f38, %f37; setp.ltu.ftz.f32 %p13, %f38, 0f3F19999A; @%p13 bra $L__BB0_20; bra.uni $L__BB0_19; $L__BB0_20: mul.ftz.f32 %f176, %f37, %f37; mov.f32 %f177, 0fBD563CAE; mov.f32 %f178, 0f3C80F082; fma.rn.ftz.f32 %f179, %f178, %f176, %f177; mov.f32 %f180, 0f3E085941; fma.rn.ftz.f32 %f181, %f179, %f176, %f180; mov.f32 %f182, 0fBEAAA9ED; fma.rn.ftz.f32 %f183, %f181, %f176, %f182; mov.f32 %f184, 0f00000000; fma.rn.ftz.f32 %f185, %f183, %f176, %f184; fma.rn.ftz.f32 %f217, %f185, %f37, %f37; bra.uni $L__BB0_21; $L__BB0_19: mul.ftz.f32 %f168, %f38, 0f4038AA3B; ex2.approx.ftz.f32 %f169, %f168; add.ftz.f32 %f170, %f169, 0f3F800000; mov.f32 %f171, 0f3F800000; rcp.approx.ftz.f32 %f172, %f170; mov.f32 %f173, 0fC0000000; fma.rn.ftz.f32 %f174, %f172, %f173, %f171; setp.ge.ftz.f32 %p14, %f38, 0f41102CB4; selp.f32 %f175, 0f3F800000, %f174, %p14; mov.b32 %r29, %f175; mov.b32 %r30, %f37; and.b32 %r31, %r30, -2147483648; or.b32 %r32, %r31, %r29; mov.b32 %f217, %r32; $L__BB0_21: mul.ftz.f32 %f186, %f5, %f217; // begin inline asm { cvt.rn.f16.f32 %rs32, %f186;} // end inline asm // begin inline asm { cvt.f32.f16 %f187, %rs17;} // end inline asm mul.ftz.f32 %f42, %f187, %f6; abs.ftz.f32 %f43, %f42; setp.ltu.ftz.f32 %p15, %f43, 0f3F19999A; @%p15 bra $L__BB0_23; bra.uni $L__BB0_22; $L__BB0_23: mul.ftz.f32 %f196, %f42, %f42; mov.f32 %f197, 0fBD563CAE; mov.f32 %f198, 0f3C80F082; fma.rn.ftz.f32 %f199, %f198, %f196, %f197; mov.f32 %f200, 0f3E085941; fma.rn.ftz.f32 %f201, %f199, %f196, %f200; mov.f32 %f202, 0fBEAAA9ED; fma.rn.ftz.f32 %f203, %f201, %f196, %f202; mov.f32 %f204, 0f00000000; fma.rn.ftz.f32 %f205, %f203, %f196, %f204; fma.rn.ftz.f32 %f218, %f205, %f42, %f42; bra.uni $L__BB0_24; $L__BB0_22: mul.ftz.f32 %f188, %f43, 0f4038AA3B; ex2.approx.ftz.f32 %f189, %f188; add.ftz.f32 %f190, %f189, 0f3F800000; mov.f32 %f191, 0f3F800000; rcp.approx.ftz.f32 %f192, %f190; mov.f32 %f193, 0fC0000000; fma.rn.ftz.f32 %f194, %f192, %f193, %f191; setp.ge.ftz.f32 %p16, %f43, 0f41102CB4; selp.f32 %f195, 0f3F800000, %f194, %p16; mov.b32 %r33, %f195; mov.b32 %r34, %f42; and.b32 %r35, %r34, -2147483648; or.b32 %r36, %r35, %r33; mov.b32 %f218, %r36; $L__BB0_24: mul.ftz.f32 %f206, %f5, %f218; // begin inline asm { cvt.rn.f16.f32 %rs34, %f206;} // end inline asm mov.b32 %r37, {%rs32, %rs34}; mov.b32 %r38, {%rs20, %rs22}; mov.b32 %r39, {%rs24, %rs26}; mov.b32 %r40, {%rs28, %rs30}; mov.b32 %f207, %r37; mov.b32 %f208, %r40; mov.b32 %f209, %r39; mov.b32 %f210, %r38; st.param.f32 [func_retval0+0], %f210; st.param.f32 [func_retval0+4], %f209; st.param.f32 [func_retval0+8], %f208; st.param.f32 [func_retval0+12], %f207; ret; }