Print this page
5007142 Add ntohll and htonll to sys/byteorder.h
6717509 Need to use bswap/bswapq for byte swap of 64-bit integer on x32/x64
PSARC 2008/474

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/intel/ia32/ml/i86_subr.s
          +++ new/usr/src/uts/intel/ia32/ml/i86_subr.s
↓ open down ↓ 102 lines elided ↑ open up ↑
 103  103  
 104  104          ENTRY(no_fault)
 105  105          movq    %gs:CPU_THREAD, %rsi
 106  106          xorl    %eax, %eax
 107  107          movq    %rax, T_ONFAULT(%rsi)           /* turn off onfault */
 108  108          movq    %rax, T_LOFAULT(%rsi)           /* turn off lofault */
 109  109          ret
 110  110          SET_SIZE(no_fault)
 111  111  
 112  112  #elif defined(__i386)
 113      -                        
      113 +
 114  114          ENTRY(on_fault)
 115  115          movl    %gs:CPU_THREAD, %edx
 116  116          movl    4(%esp), %eax                   /* jumpbuf address */
 117  117          leal    catch_fault, %ecx
 118  118          movl    %eax, T_ONFAULT(%edx)           /* jumpbuf in t_onfault */
 119  119          movl    %ecx, T_LOFAULT(%edx)           /* catch_fault in t_lofault */
 120  120          jmp     setjmp                          /* let setjmp do the rest */
 121  121  
 122  122  catch_fault:
 123  123          movl    %gs:CPU_THREAD, %edx
↓ open down ↓ 774 lines elided ↑ open up ↑
 898  898  /*ARGSUSED*/
 899  899  void
 900  900  _insque(caddr_t entryp, caddr_t predp)
 901  901  {}
 902  902  
 903  903  #else   /* __lint */
 904  904  
 905  905  #if defined(__amd64)
 906  906  
 907  907          ENTRY(_insque)
 908      -        movq    (%rsi), %rax            /* predp->forw                  */
      908 +        movq    (%rsi), %rax            /* predp->forw                  */
 909  909          movq    %rsi, CPTRSIZE(%rdi)    /* entryp->back = predp         */
 910  910          movq    %rax, (%rdi)            /* entryp->forw = predp->forw   */
 911  911          movq    %rdi, (%rsi)            /* predp->forw = entryp         */
 912  912          movq    %rdi, CPTRSIZE(%rax)    /* predp->forw->back = entryp   */
 913  913          ret
 914  914          SET_SIZE(_insque)
 915  915  
 916  916  #elif defined(__i386)
 917  917  
 918  918          ENTRY(_insque)
↓ open down ↓ 154 lines elided ↑ open up ↑
1073 1073  
1074 1074  #ifdef DEBUG
1075 1075          .text
1076 1076  .str_panic_msg:
1077 1077          .string "strlen: argument below kernelbase"
1078 1078  #endif /* DEBUG */
1079 1079  
1080 1080  #endif  /* __lint */
1081 1081  
1082 1082          /*
1083      -         * Berkley 4.3 introduced symbolically named interrupt levels
     1083 +         * Berkeley 4.3 introduced symbolically named interrupt levels
1084 1084           * as a way deal with priority in a machine independent fashion.
1085 1085           * Numbered priorities are machine specific, and should be
1086 1086           * discouraged where possible.
1087 1087           *
1088 1088           * Note, for the machine specific priorities there are
1089 1089           * examples listed for devices that use a particular priority.
1090 1090           * It should not be construed that all devices of that
1091 1091           * type should be at that priority.  It is currently were
1092 1092           * the current devices fit into the priority scheme based
1093 1093           * upon time criticalness.
↓ open down ↓ 92 lines elided ↑ open up ↑
1186 1186          SET_SIZE(spl6)
1187 1187          SET_SIZE(splhigh)
1188 1188          SET_SIZE(splhi)
1189 1189  
1190 1190          /* allow all interrupts */
1191 1191          ENTRY(spl0)
1192 1192          SETPRI(0)
1193 1193          SET_SIZE(spl0)
1194 1194  
1195 1195  
1196      -        /* splx implentation */
     1196 +        /* splx implementation */
1197 1197          ENTRY(splx)
1198 1198          jmp     do_splx         /* redirect to common splx code */
1199 1199          SET_SIZE(splx)
1200 1200  
1201 1201  #endif  /* __lint */
1202 1202  
1203 1203  #if defined(__i386)
1204 1204  
1205 1205  /*
1206 1206   * Read and write the %gs register
↓ open down ↓ 668 lines elided ↑ open up ↑
1875 1875  #else   /* __lint */
1876 1876  
1877 1877  #if defined(__amd64)
1878 1878  
1879 1879          ENTRY(scanc)
1880 1880                                          /* rdi == size */
1881 1881                                          /* rsi == cp */
1882 1882                                          /* rdx == table */
1883 1883                                          /* rcx == mask */
1884 1884          addq    %rsi, %rdi              /* end = &cp[size] */
1885      -.scanloop:      
     1885 +.scanloop:
1886 1886          cmpq    %rdi, %rsi              /* while (cp < end */
1887 1887          jnb     .scandone
1888 1888          movzbq  (%rsi), %r8             /* %r8 = *cp */
1889 1889          incq    %rsi                    /* cp++ */
1890 1890          testb   %cl, (%r8, %rdx)
1891 1891          jz      .scanloop               /*  && (table[*cp] & mask) == 0) */
1892 1892          decq    %rsi                    /* (fix post-increment) */
1893 1893  .scandone:
1894 1894          movl    %edi, %eax
1895 1895          subl    %esi, %eax              /* return (end - cp) */
1896 1896          ret
1897 1897          SET_SIZE(scanc)
1898 1898  
1899 1899  #elif defined(__i386)
1900      -        
     1900 +
1901 1901          ENTRY(scanc)
1902 1902          pushl   %edi
1903 1903          pushl   %esi
1904 1904          movb    24(%esp), %cl           /* mask = %cl */
1905 1905          movl    16(%esp), %esi          /* cp = %esi */
1906 1906          movl    20(%esp), %edx          /* table = %edx */
1907 1907          movl    %esi, %edi
1908 1908          addl    12(%esp), %edi          /* end = &cp[size]; */
1909 1909  .scanloop:
1910 1910          cmpl    %edi, %esi              /* while (cp < end */
↓ open down ↓ 5 lines elided ↑ open up ↑
1916 1916          jz      .scanloop               /*   && (table[*cp] & mask) == 0) */
1917 1917          dec     %esi                    /* post-incremented */
1918 1918  .scandone:
1919 1919          movl    %edi, %eax
1920 1920          subl    %esi, %eax              /* return (end - cp) */
1921 1921          popl    %esi
1922 1922          popl    %edi
1923 1923          ret
1924 1924          SET_SIZE(scanc)
1925 1925  
1926      -#endif  /* __i386 */    
     1926 +#endif  /* __i386 */
1927 1927  #endif  /* __lint */
1928 1928  
1929 1929  /*
1930 1930   * Replacement functions for ones that are normally inlined.
1931 1931   * In addition to the copy in i86.il, they are defined here just in case.
1932 1932   */
1933 1933  
1934 1934  #if defined(__lint)
1935 1935  
1936 1936  ulong_t
↓ open down ↓ 83 lines elided ↑ open up ↑
2020 2020  #elif defined(__i386)
2021 2021  
2022 2022          ENTRY(curcpup)
2023 2023          movl    %gs:CPU_SELF, %eax
2024 2024          ret
2025 2025          SET_SIZE(curcpup)
2026 2026  
2027 2027  #endif  /* __i386 */
2028 2028  #endif  /* __lint */
2029 2029  
     2030 +/* htonll(), ntohll(), htonl(), ntohl(), htons(), ntohs()
     2031 + * These functions reverse the byte order of the input parameter and returns
     2032 + * the result.  This is to convert the byte order from host byte order
     2033 + * (little endian) to network byte order (big endian), or vice versa.
     2034 + */
     2035 +
2030 2036  #if defined(__lint)
2031 2037  
2032      -/* ARGSUSED */
     2038 +uint64_t
     2039 +htonll(uint64_t i)
     2040 +{ return (i); }
     2041 +
     2042 +uint64_t
     2043 +ntohll(uint64_t i)
     2044 +{ return (i); }
     2045 +
2033 2046  uint32_t
2034 2047  htonl(uint32_t i)
2035      -{ return (0); }
     2048 +{ return (i); }
2036 2049  
2037      -/* ARGSUSED */
2038 2050  uint32_t
2039 2051  ntohl(uint32_t i)
2040      -{ return (0); }
     2052 +{ return (i); }
2041 2053  
     2054 +uint16_t
     2055 +htons(uint16_t i)
     2056 +{ return (i); }
     2057 +
     2058 +uint16_t
     2059 +ntohs(uint16_t i)
     2060 +{ return (i); }
     2061 +
2042 2062  #else   /* __lint */
2043 2063  
2044 2064  #if defined(__amd64)
2045 2065  
     2066 +        ENTRY(htonll)
     2067 +        ALTENTRY(ntohll)
     2068 +        movq    %rdi, %rax
     2069 +        bswapq  %rax
     2070 +        ret
     2071 +        SET_SIZE(ntohll)
     2072 +        SET_SIZE(htonll)
     2073 +
2046 2074          /* XX64 there must be shorter sequences for this */
2047 2075          ENTRY(htonl)
2048 2076          ALTENTRY(ntohl)
2049 2077          movl    %edi, %eax
2050 2078          bswap   %eax
2051 2079          ret
2052 2080          SET_SIZE(ntohl)
2053 2081          SET_SIZE(htonl)
2054 2082  
2055      -#elif defined(__i386)
2056      -
2057      -        ENTRY(htonl)
2058      -        ALTENTRY(ntohl)
2059      -        movl    4(%esp), %eax
2060      -        bswap   %eax
2061      -        ret
2062      -        SET_SIZE(ntohl)
2063      -        SET_SIZE(htonl)
2064      -
2065      -#endif  /* __i386 */
2066      -#endif  /* __lint */
2067      -
2068      -#if defined(__lint)
2069      -
2070      -/* ARGSUSED */
2071      -uint16_t
2072      -htons(uint16_t i)
2073      -{ return (0); }
2074      -
2075      -/* ARGSUSED */
2076      -uint16_t
2077      -ntohs(uint16_t i)
2078      -{ return (0); }
2079      -
2080      -
2081      -#else   /* __lint */
2082      -
2083      -#if defined(__amd64)
2084      -
2085 2083          /* XX64 there must be better sequences for this */
2086 2084          ENTRY(htons)
2087 2085          ALTENTRY(ntohs)
2088 2086          movl    %edi, %eax
2089 2087          bswap   %eax
2090 2088          shrl    $16, %eax
2091 2089          ret
2092      -        SET_SIZE(ntohs) 
     2090 +        SET_SIZE(ntohs)
2093 2091          SET_SIZE(htons)
2094 2092  
2095 2093  #elif defined(__i386)
2096 2094  
     2095 +        ENTRY(htonll)
     2096 +        ALTENTRY(ntohll)
     2097 +        movl    4(%esp), %edx
     2098 +        movl    8(%esp), %eax
     2099 +        bswap   %edx
     2100 +        bswap   %eax
     2101 +        ret
     2102 +        SET_SIZE(ntohll)
     2103 +        SET_SIZE(htonll)
     2104 +
     2105 +        ENTRY(htonl)
     2106 +        ALTENTRY(ntohl)
     2107 +        movl    4(%esp), %eax
     2108 +        bswap   %eax
     2109 +        ret
     2110 +        SET_SIZE(ntohl)
     2111 +        SET_SIZE(htonl)
     2112 +
2097 2113          ENTRY(htons)
2098 2114          ALTENTRY(ntohs)
2099 2115          movl    4(%esp), %eax
2100 2116          bswap   %eax
2101 2117          shrl    $16, %eax
2102 2118          ret
2103 2119          SET_SIZE(ntohs)
2104 2120          SET_SIZE(htons)
2105 2121  
2106 2122  #endif  /* __i386 */
↓ open down ↓ 2092 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX