1063 testl $3, %eax /* if %eax not word aligned */
1064 jnz .not_word_aligned /* goto .not_word_aligned */
1065 jmp .word_aligned /* goto .word_aligned */
1066 .align 4
1067 .null_found:
1068 subl 4(%esp), %eax /* %eax -= string address */
1069 ret
1070 SET_SIZE(strlen)
1071
1072 #endif /* __i386 */
1073
1074 #ifdef DEBUG
1075 .text
1076 .str_panic_msg:
1077 .string "strlen: argument below kernelbase"
1078 #endif /* DEBUG */
1079
1080 #endif /* __lint */
1081
1082 /*
1083 * Berkley 4.3 introduced symbolically named interrupt levels
1084 * as a way deal with priority in a machine independent fashion.
1085 * Numbered priorities are machine specific, and should be
1086 * discouraged where possible.
1087 *
1088 * Note, for the machine specific priorities there are
1089 * examples listed for devices that use a particular priority.
1090 * It should not be construed that all devices of that
1091 * type should be at that priority. It is currently were
1092 * the current devices fit into the priority scheme based
1093 * upon time criticalness.
1094 *
1095 * The underlying assumption of these assignments is that
1096 * IPL 10 is the highest level from which a device
1097 * routine can call wakeup. Devices that interrupt from higher
1098 * levels are restricted in what they can do. If they need
1099 * kernels services they should schedule a routine at a lower
1100 * level (via software interrupt) to do the required
1101 * processing.
1102 *
1103 * Examples of this higher usage:
1176 SET_SIZE(splzs)
1177
1178 ENTRY(splhi)
1179 ALTENTRY(splhigh)
1180 ALTENTRY(spl6)
1181 ALTENTRY(i_ddi_splhigh)
1182
1183 RAISE(DISP_LEVEL)
1184
1185 SET_SIZE(i_ddi_splhigh)
1186 SET_SIZE(spl6)
1187 SET_SIZE(splhigh)
1188 SET_SIZE(splhi)
1189
1190 /* allow all interrupts */
1191 ENTRY(spl0)
1192 SETPRI(0)
1193 SET_SIZE(spl0)
1194
1195
1196 /* splx implentation */
1197 ENTRY(splx)
1198 jmp do_splx /* redirect to common splx code */
1199 SET_SIZE(splx)
1200
1201 #endif /* __lint */
1202
1203 #if defined(__i386)
1204
1205 /*
1206 * Read and write the %gs register
1207 */
1208
1209 #if defined(__lint)
1210
1211 /*ARGSUSED*/
1212 uint16_t
1213 getgs(void)
1214 { return (0); }
1215
1216 /*ARGSUSED*/
2010
2011 #else /* __lint */
2012
2013 #if defined(__amd64)
2014
2015 ENTRY(curcpup)
2016 movq %gs:CPU_SELF, %rax
2017 ret
2018 SET_SIZE(curcpup)
2019
2020 #elif defined(__i386)
2021
2022 ENTRY(curcpup)
2023 movl %gs:CPU_SELF, %eax
2024 ret
2025 SET_SIZE(curcpup)
2026
2027 #endif /* __i386 */
2028 #endif /* __lint */
2029
2030 #if defined(__lint)
2031
2032 /* ARGSUSED */
2033 uint32_t
2034 htonl(uint32_t i)
2035 { return (0); }
2036
2037 /* ARGSUSED */
2038 uint32_t
2039 ntohl(uint32_t i)
2040 { return (0); }
2041
2042 #else /* __lint */
2043
2044 #if defined(__amd64)
2045
2046 /* XX64 there must be shorter sequences for this */
2047 ENTRY(htonl)
2048 ALTENTRY(ntohl)
2049 movl %edi, %eax
2050 bswap %eax
2051 ret
2052 SET_SIZE(ntohl)
2053 SET_SIZE(htonl)
2054
2055 #elif defined(__i386)
2056
2057 ENTRY(htonl)
2058 ALTENTRY(ntohl)
2059 movl 4(%esp), %eax
2060 bswap %eax
2061 ret
2062 SET_SIZE(ntohl)
2063 SET_SIZE(htonl)
2064
2065 #endif /* __i386 */
2066 #endif /* __lint */
2067
2068 #if defined(__lint)
2069
2070 /* ARGSUSED */
2071 uint16_t
2072 htons(uint16_t i)
2073 { return (0); }
2074
2075 /* ARGSUSED */
2076 uint16_t
2077 ntohs(uint16_t i)
2078 { return (0); }
2079
2080
2081 #else /* __lint */
2082
2083 #if defined(__amd64)
2084
2085 /* XX64 there must be better sequences for this */
2086 ENTRY(htons)
2087 ALTENTRY(ntohs)
2088 movl %edi, %eax
2089 bswap %eax
2090 shrl $16, %eax
2091 ret
2092 SET_SIZE(ntohs)
2093 SET_SIZE(htons)
2094
2095 #elif defined(__i386)
2096
2097 ENTRY(htons)
2098 ALTENTRY(ntohs)
2099 movl 4(%esp), %eax
2100 bswap %eax
2101 shrl $16, %eax
2102 ret
2103 SET_SIZE(ntohs)
2104 SET_SIZE(htons)
2105
2106 #endif /* __i386 */
2107 #endif /* __lint */
2108
2109
2110 #if defined(__lint)
2111
2112 /* ARGSUSED */
2113 void
2114 intr_restore(ulong_t i)
2115 { return; }
2116
|
1063 testl $3, %eax /* if %eax not word aligned */
1064 jnz .not_word_aligned /* goto .not_word_aligned */
1065 jmp .word_aligned /* goto .word_aligned */
1066 .align 4
1067 .null_found:
1068 subl 4(%esp), %eax /* %eax -= string address */
1069 ret
1070 SET_SIZE(strlen)
1071
1072 #endif /* __i386 */
1073
1074 #ifdef DEBUG
1075 .text
1076 .str_panic_msg:
1077 .string "strlen: argument below kernelbase"
1078 #endif /* DEBUG */
1079
1080 #endif /* __lint */
1081
1082 /*
1083 * Berkeley 4.3 introduced symbolically named interrupt levels
1084 * as a way deal with priority in a machine independent fashion.
1085 * Numbered priorities are machine specific, and should be
1086 * discouraged where possible.
1087 *
1088 * Note, for the machine specific priorities there are
1089 * examples listed for devices that use a particular priority.
1090 * It should not be construed that all devices of that
1091 * type should be at that priority. It is currently were
1092 * the current devices fit into the priority scheme based
1093 * upon time criticalness.
1094 *
1095 * The underlying assumption of these assignments is that
1096 * IPL 10 is the highest level from which a device
1097 * routine can call wakeup. Devices that interrupt from higher
1098 * levels are restricted in what they can do. If they need
1099 * kernels services they should schedule a routine at a lower
1100 * level (via software interrupt) to do the required
1101 * processing.
1102 *
1103 * Examples of this higher usage:
1176 SET_SIZE(splzs)
1177
1178 ENTRY(splhi)
1179 ALTENTRY(splhigh)
1180 ALTENTRY(spl6)
1181 ALTENTRY(i_ddi_splhigh)
1182
1183 RAISE(DISP_LEVEL)
1184
1185 SET_SIZE(i_ddi_splhigh)
1186 SET_SIZE(spl6)
1187 SET_SIZE(splhigh)
1188 SET_SIZE(splhi)
1189
1190 /* allow all interrupts */
1191 ENTRY(spl0)
1192 SETPRI(0)
1193 SET_SIZE(spl0)
1194
1195
1196 /* splx implementation */
1197 ENTRY(splx)
1198 jmp do_splx /* redirect to common splx code */
1199 SET_SIZE(splx)
1200
1201 #endif /* __lint */
1202
1203 #if defined(__i386)
1204
1205 /*
1206 * Read and write the %gs register
1207 */
1208
1209 #if defined(__lint)
1210
1211 /*ARGSUSED*/
1212 uint16_t
1213 getgs(void)
1214 { return (0); }
1215
1216 /*ARGSUSED*/
2010
2011 #else /* __lint */
2012
2013 #if defined(__amd64)
2014
2015 ENTRY(curcpup)
2016 movq %gs:CPU_SELF, %rax
2017 ret
2018 SET_SIZE(curcpup)
2019
2020 #elif defined(__i386)
2021
2022 ENTRY(curcpup)
2023 movl %gs:CPU_SELF, %eax
2024 ret
2025 SET_SIZE(curcpup)
2026
2027 #endif /* __i386 */
2028 #endif /* __lint */
2029
2030 /* htonll(), ntohll(), htonl(), ntohl(), htons(), ntohs()
2031 * These functions reverse the byte order of the input parameter and returns
2032 * the result. This is to convert the byte order from host byte order
2033 * (little endian) to network byte order (big endian), or vice versa.
2034 */
2035
2036 #if defined(__lint)
2037
2038 uint64_t
2039 htonll(uint64_t i)
2040 { return (i); }
2041
2042 uint64_t
2043 ntohll(uint64_t i)
2044 { return (i); }
2045
2046 uint32_t
2047 htonl(uint32_t i)
2048 { return (i); }
2049
2050 uint32_t
2051 ntohl(uint32_t i)
2052 { return (i); }
2053
2054 uint16_t
2055 htons(uint16_t i)
2056 { return (i); }
2057
2058 uint16_t
2059 ntohs(uint16_t i)
2060 { return (i); }
2061
2062 #else /* __lint */
2063
2064 #if defined(__amd64)
2065
2066 ENTRY(htonll)
2067 ALTENTRY(ntohll)
2068 movq %rdi, %rax
2069 bswapq %rax
2070 ret
2071 SET_SIZE(ntohll)
2072 SET_SIZE(htonll)
2073
2074 /* XX64 there must be shorter sequences for this */
2075 ENTRY(htonl)
2076 ALTENTRY(ntohl)
2077 movl %edi, %eax
2078 bswap %eax
2079 ret
2080 SET_SIZE(ntohl)
2081 SET_SIZE(htonl)
2082
2083 /* XX64 there must be better sequences for this */
2084 ENTRY(htons)
2085 ALTENTRY(ntohs)
2086 movl %edi, %eax
2087 bswap %eax
2088 shrl $16, %eax
2089 ret
2090 SET_SIZE(ntohs)
2091 SET_SIZE(htons)
2092
2093 #elif defined(__i386)
2094
2095 ENTRY(htonll)
2096 ALTENTRY(ntohll)
2097 movl 4(%esp), %edx
2098 movl 8(%esp), %eax
2099 bswap %edx
2100 bswap %eax
2101 ret
2102 SET_SIZE(ntohll)
2103 SET_SIZE(htonll)
2104
2105 ENTRY(htonl)
2106 ALTENTRY(ntohl)
2107 movl 4(%esp), %eax
2108 bswap %eax
2109 ret
2110 SET_SIZE(ntohl)
2111 SET_SIZE(htonl)
2112
2113 ENTRY(htons)
2114 ALTENTRY(ntohs)
2115 movl 4(%esp), %eax
2116 bswap %eax
2117 shrl $16, %eax
2118 ret
2119 SET_SIZE(ntohs)
2120 SET_SIZE(htons)
2121
2122 #endif /* __i386 */
2123 #endif /* __lint */
2124
2125
2126 #if defined(__lint)
2127
2128 /* ARGSUSED */
2129 void
2130 intr_restore(ulong_t i)
2131 { return; }
2132
|