The POSIX daylight
and timezone
variables do not suffice and are no longer needed.
+ They are planned to be removed in a future edition of POSIX.
To get a timestamp's UT offset, consult
the tm_gmtoff
member if available; otherwise,
subtract values returned by localtime
@@ -1278,13 +1344,13 @@ Leap seconds
Leap seconds were introduced in 1972 to accommodate the
difference between atomic time and the less regular rotation of the earth.
-Unfortunately they caused so many problems with civil
-timekeeping that they
-are planned
-to be discontinued by 2035, with some as-yet-undetermined
-mechanism replacing them, perhaps after the year 2135.
-Despite their impending obsolescence, a record of leap seconds is still
-needed to resolve timestamps from 1972 through 2035.
+Unfortunately they have caused so many problems with civil
+timekeeping that there are
+plans
+to discontinue them by 2035.
+Even if these plans come to fruition, a record of leap seconds will still be
+needed to resolve timestamps from 1972 through 2035,
+and there may also be a need to record whatever mechanism replaces them.
@@ -1374,6 +1440,12 @@
Time and time zones off Earth
the establishment of a reference timescale for the Moon, which has
days roughly equivalent to 29.5 Earth days, and where relativistic
effects cause clocks to tick slightly faster than on Earth.
+Also, NASA
+has been ordered
+to consider the establishment of Coordinated Lunar Time (LTC).
+It is not yet known whether the US and European efforts will result in
+multiple timescales on the Moon.
diff --git a/contrib/tzdata/version b/contrib/tzdata/version
index 04fe6744432f..699e50d4d38e 100644
--- a/contrib/tzdata/version
+++ b/contrib/tzdata/version
@@ -1 +1 @@
-2024a
+2024b
diff --git a/contrib/tzdata/ziguard.awk b/contrib/tzdata/ziguard.awk
index 7a3404fa4fcc..c0acb72a0380 100644
--- a/contrib/tzdata/ziguard.awk
+++ b/contrib/tzdata/ziguard.awk
@@ -5,14 +5,10 @@
# This is not a general-purpose converter; it is designed for current tzdata.
# It just converts from current source to main, vanguard, and rearguard forms.
# Although it might be nice for it to be idempotent, or to be useful
-# for converting back and forth between vanguard and rearguard formats,
+# for converting back and forth between formats,
# it does not do these nonessential tasks now.
#
-# Although main and vanguard forms are currently equivalent,
-# this need not always be the case. When the two forms differ,
-# this script can convert either from main to vanguard form (needed then),
-# or from vanguard to main form (this conversion would be needed later,
-# after main became rearguard and vanguard became main).
+# This script can convert from main to vanguard form and vice versa.
# There is no need to convert rearguard to other forms.
#
# When converting to vanguard form, the output can use the line
@@ -145,12 +141,12 @@ DATAFORM != "main" {
}
# If this line should differ due to Portugal benefiting from %z if supported,
- # uncomment the desired version and comment out the undesired one.
- if ($0 ~ /^#?[\t ]+-[12]:00[\t ]+Port[\t ]+[%+-]/) {
- if (($0 ~ /%z/) == (DATAFORM == "vanguard")) {
- uncomment = in_comment
- } else {
+ # comment out the undesired version and uncomment the desired one.
+ if ($0 ~ /^#?[\t ]+-[12]:00[\t ]+((Port|W-Eur)[\t ]+[%+-]|-[\t ]+(%z|-01)[\t ]+1982 Mar 28)/) {
+ if (($0 ~ /%z/) == (DATAFORM == "rearguard")) {
comment_out = !in_comment
+ } else {
+ uncomment = in_comment
}
}
@@ -172,13 +168,8 @@ DATAFORM != "main" {
sub(/^/, "#")
}
- # Prefer %z in vanguard form, explicit abbreviations otherwise.
- if (DATAFORM == "vanguard") {
- sub(/^(Zone[\t ]+[^\t ]+)?[\t ]+[^\t ]+[\t ]+[^\t ]+[\t ]+[-+][^\t ]+/, \
- "&CHANGE-TO-%z")
- sub(/-00CHANGE-TO-%z/, "-00")
- sub(/[-+][^\t ]+CHANGE-TO-/, "")
- } else {
+ # Prefer explicit abbreviations in rearguard form, %z otherwise.
+ if (DATAFORM == "rearguard") {
if ($0 ~ /^[^#]*%z/) {
stdoff_column = 2 * ($0 ~ /^Zone/) + 1
rules_column = stdoff_column + 1
@@ -216,6 +207,11 @@ DATAFORM != "main" {
}
sub(/%z/, abbr)
}
+ } else {
+ sub(/^(Zone[\t ]+[^\t ]+)?[\t ]+[^\t ]+[\t ]+[^\t ]+[\t ]+[-+][^\t ]+/, \
+ "&CHANGE-TO-%z")
+ sub(/-00CHANGE-TO-%z/, "-00")
+ sub(/[-+][^\t ]+CHANGE-TO-/, "")
}
# Normally, prefer whole seconds. However, prefer subseconds
diff --git a/contrib/tzdata/zone.tab b/contrib/tzdata/zone.tab
index 3fa9306afbad..bfc0b5933044 100644
--- a/contrib/tzdata/zone.tab
+++ b/contrib/tzdata/zone.tab
@@ -264,8 +264,7 @@ MK +4159+02126 Europe/Skopje
ML +1239-00800 Africa/Bamako
MM +1647+09610 Asia/Yangon
MN +4755+10653 Asia/Ulaanbaatar most of Mongolia
-MN +4801+09139 Asia/Hovd Bayan-Olgiy, Govi-Altai, Hovd, Uvs, Zavkhan
-MN +4804+11430 Asia/Choibalsan Dornod, Sukhbaatar
+MN +4801+09139 Asia/Hovd Bayan-Olgii, Hovd, Uvs
MO +221150+1133230 Asia/Macau
MP +1512+14545 Pacific/Saipan
MQ +1436-06105 America/Martinique
diff --git a/contrib/tzdata/zone1970.tab b/contrib/tzdata/zone1970.tab
index a01f26d94a2d..764215888efa 100644
--- a/contrib/tzdata/zone1970.tab
+++ b/contrib/tzdata/zone1970.tab
@@ -210,8 +210,7 @@ MD +4700+02850 Europe/Chisinau
MH +0905+16720 Pacific/Kwajalein Kwajalein
MM,CC +1647+09610 Asia/Yangon
MN +4755+10653 Asia/Ulaanbaatar most of Mongolia
-MN +4801+09139 Asia/Hovd Bayan-Ölgii, Govi-Altai, Hovd, Uvs, Zavkhan
-MN +4804+11430 Asia/Choibalsan Dornod, Sükhbaatar
+MN +4801+09139 Asia/Hovd Bayan-Ölgii, Hovd, Uvs
MO +221150+1133230 Asia/Macau
MQ +1436-06105 America/Martinique
MT +3554+01431 Europe/Malta
diff --git a/contrib/tzdata/zonenow.tab b/contrib/tzdata/zonenow.tab
index b6f2910956fb..01f536b3ba38 100644
--- a/contrib/tzdata/zonenow.tab
+++ b/contrib/tzdata/zonenow.tab
@@ -5,7 +5,7 @@
# From Paul Eggert (2023-12-18):
# This file contains a table where each row stands for a timezone
# where civil timestamps are predicted to agree from now on.
-# This file is like zone1970.tab (see zone1970.tab's coments),
+# This file is like zone1970.tab (see zone1970.tab's comments),
# but with the following changes:
#
# 1. Each timezone corresponds to a set of clocks that are planned
@@ -123,8 +123,6 @@ XX +1455-02331 Atlantic/Cape_Verde Cape Verde
#
# -01/+00 (EU DST)
XX +3744-02540 Atlantic/Azores Azores
-# -01/+00 (EU DST) until 2024-03-31; then -02/-01 (EU DST)
-XX +7029-02158 America/Scoresbysund Ittoqqortoormiit
#
# +00 - GMT
XX +0519-00402 Africa/Abidjan far western Africa; Iceland ("GMT")
@@ -199,7 +197,7 @@ XX +2518+05518 Asia/Dubai Russia; Caucasus; Persian Gulf; Seychelles; Réunion
XX +3431+06912 Asia/Kabul Afghanistan
#
# +05
-XX +4120+06918 Asia/Tashkent Russia; west Kazakhstan; Tajikistan; Turkmenistan; Uzbekistan; Maldives
+XX +4120+06918 Asia/Tashkent Russia; Kazakhstan; Tajikistan; Turkmenistan; Uzbekistan; Maldives
#
# +05 - PKT
XX +2452+06703 Asia/Karachi Pakistan ("PKT")
@@ -215,8 +213,6 @@ XX +2743+08519 Asia/Kathmandu Nepal
#
# +06
XX +2343+09025 Asia/Dhaka Russia; Kyrgyzstan; Bhutan; Bangladesh; Chagos
-# +06 until 2024-03-01; then +05
-XX +4315+07657 Asia/Almaty Kazakhstan (except western areas)
#
# +06:30
XX +1647+09610 Asia/Yangon Myanmar; Cocos
diff --git a/etc/mtree/BSD.tests.dist b/etc/mtree/BSD.tests.dist
index ba03881bcc27..ac53de071c11 100644
--- a/etc/mtree/BSD.tests.dist
+++ b/etc/mtree/BSD.tests.dist
@@ -791,6 +791,10 @@
..
compat32
..
+ cam
+ ctl
+ ..
+ ..
devrandom
..
dtrace
diff --git a/etc/mtree/BSD.usr.dist b/etc/mtree/BSD.usr.dist
index 37d9b5854d0f..281302272716 100644
--- a/etc/mtree/BSD.usr.dist
+++ b/etc/mtree/BSD.usr.dist
@@ -74,6 +74,10 @@
engines-3
..
flua
+ freebsd
+ sys
+ ..
+ ..
..
i18n
..
diff --git a/lib/Makefile b/lib/Makefile
index f44064a2cc6d..24d7fddef7af 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -166,7 +166,7 @@ SUBDIR_DEPEND_nss_tacplus= libtacplus
.if !defined(COMPAT_LIBCOMPAT)
SUBDIR+= flua
-SUBDIR_DEPEND_flua= libjail
+SUBDIR_DEPEND_flua= libjail libucl
.endif
# NB: keep these sorted by MK_* knobs
diff --git a/lib/flua/Makefile b/lib/flua/Makefile
index 769736039f7e..d88e76f61062 100644
--- a/lib/flua/Makefile
+++ b/lib/flua/Makefile
@@ -1,4 +1,6 @@
+SUBDIR+= libfreebsd
SUBDIR+= libhash
SUBDIR+= libjail
+SUBDIR+= libucl
.include
diff --git a/lib/flua/libfreebsd/Makefile b/lib/flua/libfreebsd/Makefile
new file mode 100644
index 000000000000..6ed0451055ff
--- /dev/null
+++ b/lib/flua/libfreebsd/Makefile
@@ -0,0 +1,3 @@
+SUBDIR+= sys
+
+.include
diff --git a/lib/flua/libfreebsd/sys/Makefile b/lib/flua/libfreebsd/sys/Makefile
new file mode 100644
index 000000000000..9f38294536f2
--- /dev/null
+++ b/lib/flua/libfreebsd/sys/Makefile
@@ -0,0 +1,4 @@
+SUBDIR+= linker
+
+.include
+
diff --git a/lib/flua/libfreebsd/sys/linker/Makefile b/lib/flua/libfreebsd/sys/linker/Makefile
new file mode 100644
index 000000000000..572b5949d2a1
--- /dev/null
+++ b/lib/flua/libfreebsd/sys/linker/Makefile
@@ -0,0 +1,12 @@
+SHLIB_NAME= linker.so
+SHLIBDIR= ${LIBDIR}/flua/freebsd/sys
+
+SRCS+= linker.c
+
+CFLAGS+= \
+ -I${SRCTOP}/contrib/lua/src \
+ -I${SRCTOP}/lib/liblua \
+
+MAN= freebsd.sys.linker.3lua
+
+.include
diff --git a/lib/flua/libfreebsd/sys/linker/freebsd.sys.linker.3lua b/lib/flua/libfreebsd/sys/linker/freebsd.sys.linker.3lua
new file mode 100644
index 000000000000..0ab8f185388a
--- /dev/null
+++ b/lib/flua/libfreebsd/sys/linker/freebsd.sys.linker.3lua
@@ -0,0 +1,67 @@
+.\"
+.\" SPDX-License-Identifier: BSD-2-Clause
+.\"
+.\" Copyright (c) 2024, Baptiste Daroussin
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd September 6, 2024
+.Dt FREEBSD.SYS.LINKER 3lua
+.Os
+.Sh NAME
+.Nm freebsd.sys.linker
+.Nd Lua binding to
+.Fx 's
+Linker functions
+.Sh SYNOPSIS
+.Bd -literal
+local linker = require('freebsd.sys.linker')
+.Ed
+.Pp
+.Bl -tag -width XXXX -compact
+.It Dv fileid, err, errno = linker.kldload(name)
+.It Dv ok, err, errno = linker.kldunload(fileid|name)
+.El
+.Sh DESCRIPTION
+The
+.Nm
+module is a binding to the
+.Fx 's
+linker functions.
+List of functions:
+.Bl -tag -width XXXX
+.It Dv fileid, err = freebsd.sys.linker.kldload(name)
+Load the kernel module named
+.Fa name
+and return the identifier
+.Pq fileid
+as an interger.
+.It Dv ok, err, errno = freebsd.sys.linker.kldunload(fileid|name)
+Unload the kernel module identifier either by
+.Fa name
+as a string, or
+.Fa fileid
+as an integer.
+.El
+.Sh SEE ALSO
+.Xr kldload 2 ,
+.Xr kldunload 2
diff --git a/lib/flua/libfreebsd/sys/linker/linker.c b/lib/flua/libfreebsd/sys/linker/linker.c
new file mode 100644
index 000000000000..a2dc3b487525
--- /dev/null
+++ b/lib/flua/libfreebsd/sys/linker/linker.c
@@ -0,0 +1,104 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024, Baptiste Daroussin
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+int luaopen_freebsd_sys_linker(lua_State *L);
+
+static int
+lua_kldload(lua_State *L)
+{
+ const char *name;
+ int ret;
+
+ name = luaL_checkstring(L, 1);
+ ret = kldload(name);
+ if (ret == -1) {
+ lua_pushnil(L);
+ lua_pushstring(L, strerror(errno));
+ lua_pushinteger(L, errno);
+ return (3);
+ }
+ lua_pushinteger(L, ret);
+ return (1);
+}
+
+static int
+lua_kldunload(lua_State *L)
+{
+ const char *name;
+ int ret, fileid;
+
+ if (lua_isinteger(L, 1)) {
+ fileid = lua_tointeger(L, 1);
+ } else {
+ name = luaL_checkstring(L, 1);
+ fileid = kldfind(name);
+ }
+ if (fileid == -1) {
+ lua_pushnil(L);
+ lua_pushstring(L, strerror(errno));
+ lua_pushinteger(L, errno);
+ return (3);
+ }
+ ret = kldunload(fileid);
+ lua_pushinteger(L, ret);
+ if (ret == -1) {
+ lua_pushnil(L);
+ lua_pushstring(L, strerror(errno));
+ lua_pushinteger(L, errno);
+ return (3);
+ }
+ lua_pushinteger(L, 0);
+ return (1);
+}
+
+#define REG_SIMPLE(n) { #n, lua_ ## n }
+static const struct luaL_Reg freebsd_sys_linker[] = {
+ REG_SIMPLE(kldload),
+ REG_SIMPLE(kldunload),
+ { NULL, NULL },
+};
+#undef REG_SIMPLE
+
+int
+luaopen_freebsd_sys_linker(lua_State *L)
+{
+ luaL_newlib(L, freebsd_sys_linker);
+
+ return (1);
+}
diff --git a/lib/flua/libucl/Makefile b/lib/flua/libucl/Makefile
new file mode 100644
index 000000000000..7d2681b85fcb
--- /dev/null
+++ b/lib/flua/libucl/Makefile
@@ -0,0 +1,17 @@
+SHLIB_NAME= ucl.so
+SHLIBDIR= ${LIBDIR}/flua
+
+WARNS= 2
+
+UCLSRC?= ${SRCTOP}/contrib/libucl
+.PATH: ${UCLSRC}/lua
+SRCS+= lua_ucl.c
+CFLAGS+= \
+ -I${SRCTOP}/contrib/lua/src \
+ -I${SRCTOP}/lib/liblua \
+ -I${UCLSRC}/include \
+ -I${UCLSRC}/src \
+ -I${UCLSRC}/uthash
+LIBADD+= ucl
+
+.include
diff --git a/lib/geom/part/gpart.8 b/lib/geom/part/gpart.8
index fa0b247c6174..121e6d6a5119 100644
--- a/lib/geom/part/gpart.8
+++ b/lib/geom/part/gpart.8
@@ -22,7 +22,7 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.Dd July 26, 2023
+.Dd September 2, 2024
.Dt GPART 8
.Os
.Sh NAME
@@ -1008,6 +1008,11 @@ A illumos/Solaris partition dedicated to reserved space.
The scheme-specific type is
.Qq Li "!6a945a3b-1dd2-11b2-99a6-080020736631"
for GPT.
+.It Cm u-boot-env
+A raw partition dedicated to U-Boot for storing its environment.
+The scheme-specific type is
+.Qq Li "!3de21764-95bd-54bd-a5c3-4abe786f38a8"
+for GPT.
.It Cm vmware-vmfs
A partition that contains a VMware File System (VMFS).
The scheme-specific types are
diff --git a/lib/libc/net/getaddrinfo.3 b/lib/libc/net/getaddrinfo.3
index 271ef8a0102b..634786a8bd12 100644
--- a/lib/libc/net/getaddrinfo.3
+++ b/lib/libc/net/getaddrinfo.3
@@ -480,7 +480,8 @@ freeaddrinfo(res0);
.Xr hosts 5 ,
.Xr resolv.conf 5 ,
.Xr services 5 ,
-.Xr hostname 7
+.Xr hostname 7 ,
+.Xr ip6addrctl 8
.Rs
.%A R. Gilligan
.%A S. Thomson
diff --git a/lib/libefivar/efivar-dp-format.c b/lib/libefivar/efivar-dp-format.c
index d97603c41dcb..72e024470a11 100644
--- a/lib/libefivar/efivar-dp-format.c
+++ b/lib/libefivar/efivar-dp-format.c
@@ -478,23 +478,41 @@ DevPathToTextAcpiEx (
)
{
ACPI_EXTENDED_HID_DEVICE_PATH *AcpiEx;
- CHAR8 *HIDStr;
- CHAR8 *UIDStr;
- CHAR8 *CIDStr;
char HIDText[11];
char CIDText[11];
-
- AcpiEx = DevPath;
- HIDStr = (CHAR8 *)(((UINT8 *)AcpiEx) + sizeof (ACPI_EXTENDED_HID_DEVICE_PATH));
- UIDStr = HIDStr + AsciiStrLen (HIDStr) + 1;
- CIDStr = UIDStr + AsciiStrLen (UIDStr) + 1;
+ UINTN CurrentLength;
+ CHAR8 *CurrentPos;
+ UINTN NextStringOffset;
+ CHAR8 *Strings[3];
+ UINT8 HidStrIndex;
+ UINT8 UidStrIndex;
+ UINT8 CidStrIndex;
+ UINT8 StrIndex;
+
+ HidStrIndex = 0;
+ UidStrIndex = 1;
+ CidStrIndex = 2;
+ AcpiEx = DevPath;
+ Strings[HidStrIndex] = NULL;
+ Strings[UidStrIndex] = NULL;
+ Strings[CidStrIndex] = NULL;
+ CurrentLength = sizeof (ACPI_EXTENDED_HID_DEVICE_PATH);
+ CurrentPos = (CHAR8 *)(((UINT8 *)AcpiEx) + sizeof (ACPI_EXTENDED_HID_DEVICE_PATH));
+ StrIndex = 0;
+ while (CurrentLength < AcpiEx->Header.Length[0] && StrIndex < ARRAY_SIZE (Strings)) {
+ Strings[StrIndex] = CurrentPos;
+ NextStringOffset = AsciiStrLen (CurrentPos) + 1;
+ CurrentLength += NextStringOffset;
+ CurrentPos += NextStringOffset;
+ StrIndex++;
+ }
if (DisplayOnly) {
if ((EISA_ID_TO_NUM (AcpiEx->HID) == 0x0A03) ||
((EISA_ID_TO_NUM (AcpiEx->CID) == 0x0A03) && (EISA_ID_TO_NUM (AcpiEx->HID) != 0x0A08)))
{
- if (AcpiEx->UID == 0) {
- UefiDevicePathLibCatPrint (Str, "PciRoot(%s)", UIDStr);
+ if (Strings[UidStrIndex] != NULL) {
+ UefiDevicePathLibCatPrint (Str, "PciRoot(%s)", Strings[UidStrIndex]);
} else {
UefiDevicePathLibCatPrint (Str, "PciRoot(0x%x)", AcpiEx->UID);
}
@@ -503,8 +521,8 @@ DevPathToTextAcpiEx (
}
if ((EISA_ID_TO_NUM (AcpiEx->HID) == 0x0A08) || (EISA_ID_TO_NUM (AcpiEx->CID) == 0x0A08)) {
- if (AcpiEx->UID == 0) {
- UefiDevicePathLibCatPrint (Str, "PcieRoot(%s)", UIDStr);
+ if (Strings[UidStrIndex] != NULL) {
+ UefiDevicePathLibCatPrint (Str, "PcieRoot(%s)", Strings[UidStrIndex]);
} else {
UefiDevicePathLibCatPrint (Str, "PcieRoot(0x%x)", AcpiEx->UID);
}
@@ -535,7 +553,10 @@ DevPathToTextAcpiEx (
(AcpiEx->CID >> 16) & 0xFFFF
);
- if ((*HIDStr == '\0') && (*CIDStr == '\0') && (*UIDStr != '\0')) {
+ if (((Strings[HidStrIndex] != NULL) && (*Strings[HidStrIndex] == '\0')) &&
+ ((Strings[CidStrIndex] != NULL) && (*Strings[CidStrIndex] == '\0')) &&
+ ((Strings[UidStrIndex] != NULL) && (*Strings[UidStrIndex] != '\0')))
+ {
//
// use AcpiExp()
//
@@ -544,7 +565,7 @@ DevPathToTextAcpiEx (
Str,
"AcpiExp(%s,0,%s)",
HIDText,
- UIDStr
+ Strings[UidStrIndex]
);
} else {
UefiDevicePathLibCatPrint (
@@ -552,28 +573,25 @@ DevPathToTextAcpiEx (
"AcpiExp(%s,%s,%s)",
HIDText,
CIDText,
- UIDStr
+ Strings[UidStrIndex]
);
}
} else {
if (DisplayOnly) {
- //
- // display only
- //
- if (AcpiEx->HID == 0) {
- UefiDevicePathLibCatPrint (Str, "AcpiEx(%s,", HIDStr);
+ if (Strings[HidStrIndex] != NULL) {
+ UefiDevicePathLibCatPrint (Str, "AcpiEx(%s,", Strings[HidStrIndex]);
} else {
UefiDevicePathLibCatPrint (Str, "AcpiEx(%s,", HIDText);
}
- if (AcpiEx->CID == 0) {
- UefiDevicePathLibCatPrint (Str, "%s,", CIDStr);
+ if (Strings[CidStrIndex] != NULL) {
+ UefiDevicePathLibCatPrint (Str, "%s,", Strings[CidStrIndex]);
} else {
UefiDevicePathLibCatPrint (Str, "%s,", CIDText);
}
- if (AcpiEx->UID == 0) {
- UefiDevicePathLibCatPrint (Str, "%s)", UIDStr);
+ if (Strings[UidStrIndex] != NULL) {
+ UefiDevicePathLibCatPrint (Str, "%s)", Strings[UidStrIndex]);
} else {
UefiDevicePathLibCatPrint (Str, "0x%x)", AcpiEx->UID);
}
@@ -584,9 +602,9 @@ DevPathToTextAcpiEx (
HIDText,
CIDText,
AcpiEx->UID,
- HIDStr,
- CIDStr,
- UIDStr
+ Strings[HidStrIndex] != NULL ? Strings[HidStrIndex] : '\0',
+ Strings[CidStrIndex] != NULL ? Strings[CidStrIndex] : '\0',
+ Strings[UidStrIndex] != NULL ? Strings[UidStrIndex] : '\0'
);
}
}
diff --git a/lib/libnv/tests/Makefile b/lib/libnv/tests/Makefile
index 2e6563a83077..aea416539c4a 100644
--- a/lib/libnv/tests/Makefile
+++ b/lib/libnv/tests/Makefile
@@ -1,6 +1,16 @@
+.include
+
ATF_TESTS_C= \
nvlist_send_recv_test
+.PATH: ${SRCTOP}/lib/libnv
+SRCS.nvlist_send_recv_test= msgio.c nvlist_send_recv_test.c
+CFLAGS.nvlist_send_recv_test+=-I${SRCTOP}/sys/contrib/libnv
+CFLAGS.nvlist_send_recv_test+=-I${SRCTOP}/lib/libnv
+.if ${MK_ASAN} != "yes"
+CFLAGS.nvlist_send_recv_test+=-DNO_ASAN
+.endif
+
ATF_TESTS_CXX= \
cnv_tests \
dnv_tests \
diff --git a/lib/libnv/tests/nv_array_tests.cc b/lib/libnv/tests/nv_array_tests.cc
index 06d7525c3e1d..9acbaef67eba 100644
--- a/lib/libnv/tests/nv_array_tests.cc
+++ b/lib/libnv/tests/nv_array_tests.cc
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2015 Mariusz Zaborski
- * All rights reserved.
+ * Copyright (c) 2015-2024 Mariusz Zaborski
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,6 +26,7 @@
#include
#include
#include
+#include
#include
#include
@@ -1161,6 +1161,58 @@ ATF_TEST_CASE_BODY(nvlist_nvlist_array__pack)
free(packed);
}
+
+ATF_TEST_CASE_WITHOUT_HEAD(nvlist_string_array_nonull__pack);
+ATF_TEST_CASE_BODY(nvlist_string_array_nonull__pack)
+{
+ nvlist_t *testnvl, *unpacked;
+ const char *somestr[3] = { "a", "b", "XXX" };
+ uint8_t *packed, *twopages, *dataptr, *secondpage;
+ size_t packed_size, page_size;
+ bool found;
+
+ page_size = sysconf(_SC_PAGESIZE);
+ testnvl = nvlist_create(0);
+ ATF_REQUIRE(testnvl != NULL);
+ ATF_REQUIRE_EQ(nvlist_error(testnvl), 0);
+ nvlist_add_string_array(testnvl, "nvl/string", somestr,
+ nitems(somestr));
+ ATF_REQUIRE_EQ(nvlist_error(testnvl), 0);
+
+ packed = (uint8_t *)nvlist_pack(testnvl, &packed_size);
+ ATF_REQUIRE(packed != NULL);
+
+ twopages = (uint8_t *)mmap(NULL, page_size * 2, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ ATF_REQUIRE(twopages != MAP_FAILED);
+ dataptr = &twopages[page_size - packed_size];
+ secondpage = &twopages[page_size];
+
+ memset(twopages, 'A', page_size * 2);
+
+ mprotect(secondpage, page_size, PROT_NONE);
+ memcpy(dataptr, packed, packed_size);
+
+ found = false;
+ for (size_t i = 0; i < packed_size - 3; i++) {
+ if (dataptr[i] == 'X' && dataptr[i + 1] == 'X' &&
+ dataptr[i + 2] == 'X' && dataptr[i + 3] == '\0') {
+ dataptr[i + 3] = 'X';
+ found = true;
+ break;
+ }
+ }
+ ATF_REQUIRE(found == true);
+
+ unpacked = nvlist_unpack(dataptr, packed_size, 0);
+ ATF_REQUIRE(unpacked == NULL);
+
+ nvlist_destroy(testnvl);
+ free(packed);
+ munmap(twopages, page_size * 2);
+}
+
+
ATF_INIT_TEST_CASES(tp)
{
@@ -1190,5 +1242,7 @@ ATF_INIT_TEST_CASES(tp)
ATF_ADD_TEST_CASE(tp, nvlist_descriptor_array__pack)
ATF_ADD_TEST_CASE(tp, nvlist_string_array__pack)
ATF_ADD_TEST_CASE(tp, nvlist_nvlist_array__pack)
+
+ ATF_ADD_TEST_CASE(tp, nvlist_string_array_nonull__pack)
}
diff --git a/lib/libnv/tests/nvlist_send_recv_test.c b/lib/libnv/tests/nvlist_send_recv_test.c
index f060ee2684d5..79297dfe2043 100644
--- a/lib/libnv/tests/nvlist_send_recv_test.c
+++ b/lib/libnv/tests/nvlist_send_recv_test.c
@@ -43,6 +43,9 @@
#include
+#include
+#include
+
#define ALPHABET "abcdefghijklmnopqrstuvwxyz"
#define fd_is_valid(fd) (fcntl((fd), F_GETFL) != -1 || errno != EBADF)
@@ -542,6 +545,192 @@ ATF_TC_BODY(nvlist_send_recv__send_closed_fd__stream, tc)
nvlist_send_recv__send_closed_fd(SOCK_STREAM);
}
+ATF_TC_WITHOUT_HEAD(nvlist_send_recv__overflow_header_size);
+ATF_TC_BODY(nvlist_send_recv__overflow_header_size, tc)
+{
+ nvlist_t *nvl;
+ void *packed;
+ size_t packed_size;
+ struct nvlist_header *header;
+ int fd, socks[2], status;
+ pid_t pid;
+
+#ifdef NO_ASAN
+ atf_tc_skip("This test requires ASAN");
+#endif
+
+ ATF_REQUIRE_EQ(socketpair(PF_UNIX, SOCK_STREAM, 0, socks), 0);
+
+ pid = fork();
+ ATF_REQUIRE(pid >= 0);
+
+ if (pid == 0) {
+ /* Child. */
+ fd = socks[0];
+ close(socks[1]);
+
+ nvl = nvlist_create(0);
+ ATF_REQUIRE(nvl != NULL);
+ ATF_REQUIRE(nvlist_empty(nvl));
+
+ packed = nvlist_pack(nvl, &packed_size);
+ ATF_REQUIRE(packed != NULL);
+ ATF_REQUIRE(packed_size >= sizeof(struct nvlist_header));
+
+ header = (struct nvlist_header *)packed;
+ header->nvlh_size = SIZE_MAX - sizeof(struct nvlist_header) + 2;
+
+ ATF_REQUIRE_EQ(write(fd, packed, packed_size),
+ (ssize_t)sizeof(struct nvlist_header));
+
+ nvlist_destroy(nvl);
+ free(packed);
+
+ exit(0);
+ } else {
+ /* Parent */
+ fd = socks[1];
+ close(socks[0]);
+
+ errno = 0;
+ nvl = nvlist_recv(fd, 0);
+ ATF_REQUIRE(nvl == NULL);
+
+ /*
+ * Make sure it has failed on EINVAL, and not on
+ * errors returned by malloc or recv.
+ */
+ ATF_REQUIRE(errno == EINVAL);
+
+ ATF_REQUIRE(waitpid(pid, &status, 0) == pid);
+ ATF_REQUIRE(status == 0);
+ close(fd);
+ }
+}
+
+ATF_TC_WITHOUT_HEAD(nvlist_send_recv__invalid_fd_size);
+ATF_TC_BODY(nvlist_send_recv__invalid_fd_size, tc)
+{
+ nvlist_t *nvl;
+ void *packed;
+ size_t packed_size;
+ struct nvlist_header *header;
+ int fd, socks[2], status;
+ pid_t pid;
+
+ ATF_REQUIRE_EQ(socketpair(PF_UNIX, SOCK_STREAM, 0, socks), 0);
+
+ pid = fork();
+ ATF_REQUIRE(pid >= 0);
+
+ if (pid == 0) {
+ /* Child. */
+ fd = socks[0];
+ close(socks[1]);
+
+ nvl = nvlist_create(0);
+ ATF_REQUIRE(nvl != NULL);
+ ATF_REQUIRE(nvlist_empty(nvl));
+
+ nvlist_add_string(nvl, "nvl/string", "test");
+ ATF_REQUIRE_EQ(nvlist_error(nvl), 0);
+
+ packed = nvlist_pack(nvl, &packed_size);
+ ATF_REQUIRE(packed != NULL);
+ ATF_REQUIRE(packed_size >= sizeof(struct nvlist_header));
+
+ header = (struct nvlist_header *)packed;
+ header->nvlh_descriptors = 0x20;
+
+ ATF_REQUIRE_EQ(write(fd, packed, packed_size),
+ (ssize_t)packed_size);
+
+ nvlist_destroy(nvl);
+ free(packed);
+
+ exit(0);
+ } else {
+ /* Parent */
+ fd = socks[1];
+ close(socks[0]);
+
+ nvl = nvlist_recv(fd, 0);
+ ATF_REQUIRE(nvl == NULL);
+
+ ATF_REQUIRE(waitpid(pid, &status, 0) == pid);
+ ATF_REQUIRE(status == 0);
+ }
+
+ close(fd);
+}
+
+ATF_TC_WITHOUT_HEAD(nvlist_send_recv__overflow_fd_size);
+ATF_TC_BODY(nvlist_send_recv__overflow_fd_size, tc)
+{
+ nvlist_t *nvl;
+ void *packed;
+ size_t packed_size;
+ struct nvlist_header *header;
+ int fd, socks[2], fds[1], status;
+ pid_t pid;
+
+ ATF_REQUIRE_EQ(socketpair(PF_UNIX, SOCK_STREAM, 0, socks), 0);
+
+ pid = fork();
+ ATF_REQUIRE(pid >= 0);
+
+ if (pid == 0) {
+ /* Child. */
+ fd = socks[0];
+ close(socks[1]);
+
+ nvl = nvlist_create(0);
+ ATF_REQUIRE(nvl != NULL);
+ ATF_REQUIRE(nvlist_empty(nvl));
+
+ nvlist_add_string(nvl, "nvl/string", "test");
+ ATF_REQUIRE_EQ(nvlist_error(nvl), 0);
+
+ packed = nvlist_pack(nvl, &packed_size);
+ ATF_REQUIRE(packed != NULL);
+ ATF_REQUIRE(packed_size >= sizeof(struct nvlist_header));
+
+ header = (struct nvlist_header *)packed;
+ header->nvlh_descriptors = 0x4000000000000002;
+
+ ATF_REQUIRE_EQ(write(fd, packed, packed_size),
+ (ssize_t)packed_size);
+
+ fds[0] = dup(STDERR_FILENO);
+ ATF_REQUIRE(fds[0] >= 0);
+ ATF_REQUIRE_EQ(fd_send(fd, fds, 1), 0);
+
+ nvlist_destroy(nvl);
+ free(packed);
+
+ close(fds[0]);
+ close(fd);
+
+ exit(0);
+ } else {
+ /* Parent */
+ fd = socks[1];
+ close(socks[0]);
+
+ nvl = nvlist_recv(fd, 0);
+ ATF_REQUIRE(nvl == NULL);
+
+ /* Make sure that fd was not parsed by nvlist */
+ ATF_REQUIRE(fd_recv(fd, fds, 1) == 0);
+
+ ATF_REQUIRE(waitpid(pid, &status, 0) == pid);
+ ATF_REQUIRE(status == 0);
+
+ close(fds[0]);
+ close(fd);
+ }
+}
+
ATF_TP_ADD_TCS(tp)
{
@@ -552,5 +741,9 @@ ATF_TP_ADD_TCS(tp)
ATF_TP_ADD_TC(tp, nvlist_send_recv__send_many_fds__dgram);
ATF_TP_ADD_TC(tp, nvlist_send_recv__send_many_fds__stream);
+ ATF_TP_ADD_TC(tp, nvlist_send_recv__overflow_header_size);
+ ATF_TP_ADD_TC(tp, nvlist_send_recv__invalid_fd_size);
+ ATF_TP_ADD_TC(tp, nvlist_send_recv__overflow_fd_size);
+
return (atf_no_error());
}
diff --git a/libexec/flua/Makefile b/libexec/flua/Makefile
index 8d36fbd95c4d..8b77d90114d1 100644
--- a/libexec/flua/Makefile
+++ b/libexec/flua/Makefile
@@ -4,7 +4,7 @@ LUASRC?= ${SRCTOP}/contrib/lua/src
.PATH: ${LUASRC}
PROG= flua
-WARNS?= 2
+WARNS?= 3
MAN= # No manpage; this is internal.
CWARNFLAGS.gcc+= -Wno-format-nonliteral
@@ -35,10 +35,4 @@ LDFLAGS+= -Wl,-E
LD_FATAL_WARNINGS=no
.endif
-UCLSRC?= ${SRCTOP}/contrib/libucl
-.PATH: ${UCLSRC}/lua
-SRCS+= lua_ucl.c
-CFLAGS+= -I${UCLSRC}/include -I${UCLSRC}/src -I${UCLSRC}/uthash
-LIBADD+= ucl
-
.include
diff --git a/libexec/flua/linit_flua.c b/libexec/flua/linit_flua.c
index 4d4d69920e94..4635970d1fd7 100644
--- a/libexec/flua/linit_flua.c
+++ b/libexec/flua/linit_flua.c
@@ -36,7 +36,6 @@
#include "lfs.h"
#include "lposix.h"
#include "lfbsd.h"
-#include "lua_ucl.h"
/*
** these libs are loaded by lua.c and are readily available to any Lua
@@ -59,8 +58,8 @@ static const luaL_Reg loadedlibs[] = {
/* FreeBSD Extensions */
{"lfs", luaopen_lfs},
{"posix.sys.stat", luaopen_posix_sys_stat},
+ {"posix.sys.utsname", luaopen_posix_sys_utsname},
{"posix.unistd", luaopen_posix_unistd},
- {"ucl", luaopen_ucl},
{"fbsd", luaopen_fbsd},
{NULL, NULL}
};
diff --git a/libexec/flua/modules/lposix.c b/libexec/flua/modules/lposix.c
index 5b6e80a0309f..fa3fd5f8e589 100644
--- a/libexec/flua/modules/lposix.c
+++ b/libexec/flua/modules/lposix.c
@@ -24,8 +24,8 @@
*
*/
-#include
#include
+#include
#include
#include
@@ -130,12 +130,50 @@ lua_getpid(lua_State *L)
return 1;
}
+static int
+lua_uname(lua_State *L)
+{
+ struct utsname name;
+ int error, n;
+
+ n = lua_gettop(L);
+ luaL_argcheck(L, n == 0, 1, "too many arguments");
+
+ error = uname(&name);
+ if (error != 0) {
+ error = errno;
+ lua_pushnil(L);
+ lua_pushstring(L, strerror(error));
+ lua_pushinteger(L, error);
+ return (3);
+ }
+
+ lua_newtable(L);
+#define setkv(f) do { \
+ lua_pushstring(L, name.f); \
+ lua_setfield(L, -2, #f); \
+} while (0)
+ setkv(sysname);
+ setkv(nodename);
+ setkv(release);
+ setkv(version);
+ setkv(machine);
+#undef setkv
+
+ return (1);
+}
+
#define REG_SIMPLE(n) { #n, lua_ ## n }
static const struct luaL_Reg sys_statlib[] = {
REG_SIMPLE(chmod),
{ NULL, NULL },
};
+static const struct luaL_Reg sys_utsnamelib[] = {
+ REG_SIMPLE(uname),
+ { NULL, NULL },
+};
+
static const struct luaL_Reg unistdlib[] = {
REG_SIMPLE(getpid),
REG_SIMPLE(chown),
@@ -150,6 +188,13 @@ luaopen_posix_sys_stat(lua_State *L)
return 1;
}
+int
+luaopen_posix_sys_utsname(lua_State *L)
+{
+ luaL_newlib(L, sys_utsnamelib);
+ return 1;
+}
+
int
luaopen_posix_unistd(lua_State *L)
{
diff --git a/libexec/flua/modules/lposix.h b/libexec/flua/modules/lposix.h
index 6085bf045d79..e37caaae9d04 100644
--- a/libexec/flua/modules/lposix.h
+++ b/libexec/flua/modules/lposix.h
@@ -8,4 +8,5 @@
#include
int luaopen_posix_sys_stat(lua_State *L);
+int luaopen_posix_sys_utsname(lua_State *L);
int luaopen_posix_unistd(lua_State *L);
diff --git a/libexec/rc/network.subr b/libexec/rc/network.subr
index 257643f48ba5..931fbec19a60 100644
--- a/libexec/rc/network.subr
+++ b/libexec/rc/network.subr
@@ -46,8 +46,8 @@ ifn_start()
ifscript_up ${ifn} && cfg=0
ifconfig_up ${ifn} && cfg=0
if ! noafif $ifn; then
- afexists inet && ipv4_up ${ifn} && cfg=0
afexists inet6 && ipv6_up ${ifn} && cfg=0
+ afexists inet && ipv4_up ${ifn} && cfg=0
fi
childif_create ${ifn} && cfg=0
@@ -67,8 +67,8 @@ ifn_stop()
[ -z "$ifn" ] && err 1 "ifn_stop called without an interface"
if ! noafif $ifn; then
- afexists inet6 && ipv6_down ${ifn} && cfg=0
afexists inet && ipv4_down ${ifn} && cfg=0
+ afexists inet6 && ipv6_down ${ifn} && cfg=0
fi
ifconfig_down ${ifn} && cfg=0
ifscript_down ${ifn} && cfg=0
diff --git a/release/Makefile.vm b/release/Makefile.vm
index d8914d305e2f..b4fe6e23ffcd 100644
--- a/release/Makefile.vm
+++ b/release/Makefile.vm
@@ -39,9 +39,10 @@ BASIC-CLOUDINIT_FSLIST?= ufs zfs
BASIC-CLOUDINIT_DESC?= Images for VM with cloudinit disk config support
EC2_FORMAT= raw
EC2_FSLIST?= ufs zfs
-EC2_FLAVOURS?= BASE CLOUD-INIT
+EC2_FLAVOURS?= BASE CLOUD-INIT SMALL
EC2-BASE_DESC= Amazon EC2 image
EC2-CLOUD-INIT_DESC= Amazon EC2 Cloud-Init image
+EC2-SMALL_DESC= Amazon EC2 small image
GCE_FORMAT= raw
GCE_FSLIST?= ufs zfs
GCE_DESC= Google Compute Engine image
diff --git a/release/tools/ec2-base.conf b/release/tools/ec2-base.conf
index d80035e11ed7..3ed20474a4e8 100644
--- a/release/tools/ec2-base.conf
+++ b/release/tools/ec2-base.conf
@@ -4,11 +4,13 @@
# Packages to install into the image we're creating. In addition to packages
# present on all EC2 AMIs, we install:
+# * amazon-ssm-agent (not enabled by default, but some users need to use
+# it on systems not connected to the internet),
# * ec2-scripts, which provides a range of EC2ification startup scripts,
# * firstboot-freebsd-update, to install security updates at first boot,
# * firstboot-pkgs, to install packages at first boot, and
# * isc-dhcp44-client, used for IPv6 network setup.
-export VM_EXTRA_PACKAGES="${VM_EXTRA_PACKAGES} ec2-scripts \
+export VM_EXTRA_PACKAGES="${VM_EXTRA_PACKAGES} amazon-ssm-agent ec2-scripts \
firstboot-freebsd-update firstboot-pkgs isc-dhcp44-client"
# Services to enable in rc.conf(5).
@@ -22,29 +24,6 @@ vm_extra_pre_umount() {
# via EC2 user-data.
echo 'firstboot_pkgs_list="devel/py-awscli"' >> ${DESTDIR}/etc/rc.conf
- # EC2 instances use DHCP to get their network configuration. IPv6
- # requires accept_rtadv.
- echo 'ifconfig_DEFAULT="SYNCDHCP accept_rtadv"' >> ${DESTDIR}/etc/rc.conf
-
- # The EC2 DHCP server can be trusted to know whether an IP address is
- # assigned to us; we don't need to ARP to check if anyone else is using
- # the address before we start using it.
- echo 'dhclient_arpwait="NO"' >> ${DESTDIR}/etc/rc.conf
-
- # Enable IPv6 on all interfaces, and spawn DHCPv6 via rtsold
- echo 'ipv6_activate_all_interfaces="YES"' >> ${DESTDIR}/etc/rc.conf
- echo 'rtsold_enable="YES"' >> ${DESTDIR}/etc/rc.conf
- echo 'rtsold_flags="-M /usr/local/libexec/rtsold-M -a"' >> ${DESTDIR}/etc/rc.conf
-
- # Provide a script which rtsold can use to launch DHCPv6
- mkdir -p ${DESTDIR}/usr/local/libexec
- cat > ${DESTDIR}/usr/local/libexec/rtsold-M <<'EOF'
-#!/bin/sh
-
-/usr/local/sbin/dhclient -6 -nw -N -cf /dev/null $1
-EOF
- chmod 755 ${DESTDIR}/usr/local/libexec/rtsold-M
-
# Any EC2 ephemeral disks seen when the system first boots will
# be "new" disks; there is no "previous boot" when they might have
# been seen and used already.
@@ -53,5 +32,8 @@ EOF
# Configuration common to all EC2 AMIs
ec2_common
+ # Standard FreeBSD network configuration
+ ec2_base_networking
+
return 0
}
diff --git a/release/tools/ec2-cloud-init.conf b/release/tools/ec2-cloud-init.conf
index 7682d635b1d6..048202e252f6 100644
--- a/release/tools/ec2-cloud-init.conf
+++ b/release/tools/ec2-cloud-init.conf
@@ -3,8 +3,8 @@
. ${WORLDDIR}/release/tools/ec2.conf
# Packages to install into the image we're creating. In addition to packages
-# present on all EC2 AMIs, we install cloud-init.
-export VM_EXTRA_PACKAGES="${VM_EXTRA_PACKAGES} net/cloud-init"
+# present on all EC2 AMIs, we install amazon-ssm-agent and cloud-init.
+export VM_EXTRA_PACKAGES="${VM_EXTRA_PACKAGES} amazon-ssm-agent net/cloud-init"
# Services to enable in rc.conf(5).
export VM_RC_LIST="${VM_RC_LIST} cloudinit sshd"
diff --git a/release/tools/ec2-small.conf b/release/tools/ec2-small.conf
new file mode 100644
index 000000000000..858836717f5a
--- /dev/null
+++ b/release/tools/ec2-small.conf
@@ -0,0 +1,44 @@
+#!/bin/sh
+
+. ${WORLDDIR}/release/tools/ec2.conf
+
+# Build with a 4.9 GB partition; the growfs rc.d script will expand
+# the partition to fill the root disk after the EC2 instance is launched.
+# Note that if this is set to G, we will end up with an GB disk
+# image since VMSIZE is the size of the filesystem partition, not the disk
+# which it resides within. (This overrides the default in ec2.conf.)
+export VMSIZE=5000m
+
+# Flags to installworld/kernel: We don't want debug symbols (kernel or
+# userland), 32-bit libraries, tests, or the debugger.
+export INSTALLOPTS="WITHOUT_DEBUG_FILES=YES WITHOUT_KERNEL_SYMBOLS=YES \
+ WITHOUT_LIB32=YES WITHOUT_TESTS=YES WITHOUT_LLDB=YES"
+
+# Packages to install into the image we're creating. In addition to packages
+# present on all EC2 AMIs, we install:
+# * ec2-scripts, which provides a range of EC2ification startup scripts,
+# * firstboot-freebsd-update, to install security updates at first boot,
+# * firstboot-pkgs, to install packages at first boot, and
+# * isc-dhcp44-client, used for IPv6 network setup.
+export VM_EXTRA_PACKAGES="${VM_EXTRA_PACKAGES} ec2-scripts \
+ firstboot-freebsd-update firstboot-pkgs isc-dhcp44-client"
+
+# Services to enable in rc.conf(5).
+export VM_RC_LIST="${VM_RC_LIST} ec2_configinit ec2_ephemeral_swap \
+ ec2_fetchkey ec2_loghostkey firstboot_freebsd_update firstboot_pkgs \
+ growfs sshd"
+
+vm_extra_pre_umount() {
+ # Any EC2 ephemeral disks seen when the system first boots will
+ # be "new" disks; there is no "previous boot" when they might have
+ # been seen and used already.
+ touch ${DESTDIR}/var/db/ec2_ephemeral_diskseen
+
+ # Configuration common to all EC2 AMIs
+ ec2_common
+
+ # Standard FreeBSD network configuration
+ ec2_base_networking
+
+ return 0
+}
diff --git a/release/tools/ec2.conf b/release/tools/ec2.conf
index 602216d3c2d4..2cca5fa713af 100644
--- a/release/tools/ec2.conf
+++ b/release/tools/ec2.conf
@@ -1,11 +1,9 @@
#!/bin/sh
-# Packages which should be installed onto all EC2 AMIs:
+# Package which should be installed onto all EC2 AMIs:
# * ebsnvme-id, which is very minimal and provides important EBS-specific
# functionality,
-# * amazon-ssm-agent (not enabled by default, but some users need to use
-# it on systems not connected to the internet).
-export VM_EXTRA_PACKAGES="${VM_EXTRA_PACKAGES} ebsnvme-id amazon-ssm-agent"
+export VM_EXTRA_PACKAGES="${VM_EXTRA_PACKAGES} ebsnvme-id"
# Services which should be enabled by default in rc.conf(5).
export VM_RC_LIST="dev_aws_disk ntpd"
@@ -104,3 +102,30 @@ EOF
return 0
}
+
+ec2_base_networking () {
+ # EC2 instances use DHCP to get their network configuration. IPv6
+ # requires accept_rtadv.
+ echo 'ifconfig_DEFAULT="SYNCDHCP accept_rtadv"' >> ${DESTDIR}/etc/rc.conf
+
+ # The EC2 DHCP server can be trusted to know whether an IP address is
+ # assigned to us; we don't need to ARP to check if anyone else is using
+ # the address before we start using it.
+ echo 'dhclient_arpwait="NO"' >> ${DESTDIR}/etc/rc.conf
+
+ # Enable IPv6 on all interfaces, and spawn DHCPv6 via rtsold
+ echo 'ipv6_activate_all_interfaces="YES"' >> ${DESTDIR}/etc/rc.conf
+ echo 'rtsold_enable="YES"' >> ${DESTDIR}/etc/rc.conf
+ echo 'rtsold_flags="-M /usr/local/libexec/rtsold-M -a"' >> ${DESTDIR}/etc/rc.conf
+
+ # Provide a script which rtsold can use to launch DHCPv6
+ mkdir -p ${DESTDIR}/usr/local/libexec
+ cat > ${DESTDIR}/usr/local/libexec/rtsold-M <<'EOF'
+#!/bin/sh
+
+/usr/local/sbin/dhclient -6 -nw -N -cf /dev/null $1
+EOF
+ chmod 755 ${DESTDIR}/usr/local/libexec/rtsold-M
+
+ return 0
+}
diff --git a/release/tools/vmimage.subr b/release/tools/vmimage.subr
index 9a60be57acf3..5d98b8990705 100644
--- a/release/tools/vmimage.subr
+++ b/release/tools/vmimage.subr
@@ -52,7 +52,7 @@ vm_install_base() {
# Installs the FreeBSD userland/kernel to the virtual machine disk.
cd ${WORLDDIR} && \
- make DESTDIR=${DESTDIR} \
+ make DESTDIR=${DESTDIR} ${INSTALLOPTS} \
installworld installkernel distribution || \
err "\n\nCannot install the base system to ${DESTDIR}."
diff --git a/sbin/geom/core/geom.8 b/sbin/geom/core/geom.8
index 124ea0f2be11..7f0f0b2911b3 100644
--- a/sbin/geom/core/geom.8
+++ b/sbin/geom/core/geom.8
@@ -1,3 +1,6 @@
+.\"-
+.\" SPDX-License-Identifier: BSD-2-Clause
+.\"
.\" Copyright (c) 2004-2005 Pawel Jakub Dawidek
.\" All rights reserved.
.\"
@@ -27,7 +30,7 @@
.Os
.Sh NAME
.Nm geom
-.Nd "universal control utility for GEOM classes"
+.Nd universal control utility for GEOM classes
.Sh SYNOPSIS
.Nm
.Ar class
@@ -66,7 +69,7 @@ which can be used for existing
.Nm
unaware classes.
Here is the list of standard commands:
-.Bl -tag -width ".Cm status"
+.Bl -tag -width indent
.It Cm help
List all available commands for the given class.
.It Cm list
@@ -74,7 +77,7 @@ Print detailed information (within the given class) about all geoms
(if no additional arguments were specified) or the given geoms.
This command is only available if the given class exists in the kernel.
Additional options include:
-.Bl -tag -width ".Fl a"
+.Bl -tag -width "-a"
.It Fl a
Print information for geoms without providers.
.El
@@ -84,9 +87,11 @@ Print general information (within the given class) about all geoms
This command is only available if the given class exists in the kernel.
.Pp
Additional options include:
-.Bl -tag -width ".Fl s"
+.Bl -tag -width "-s"
.It Fl a
-When used with -g, print status for geoms without providers.
+When used with
+.Fl g ,
+print status for geoms without providers.
.It Fl g
Report statuses for geoms instead of providers.
.It Fl s
@@ -107,7 +112,7 @@ kernel module.
.El
.Pp
Additional options include:
-.Bl -tag -width ".Cm status"
+.Bl -tag -width indent
.It Fl p Ar provider-name
Print detailed information about the geom which provides
.Ar provider-name .
@@ -170,7 +175,7 @@ VIRSTOR
.Sh ENVIRONMENT
The following environment variables affect the execution of
.Nm :
-.Bl -tag -width ".Ev GEOM_LIBRARY_PATH"
+.Bl -tag -width "GEOM_LIBRARY_PATH"
.It Ev GEOM_LIBRARY_PATH
Specifies the path where shared libraries are stored instead of
.Pa /lib/geom/ .
@@ -213,7 +218,6 @@ geom md unload
.Xr gnop 8 ,
.Xr gpart 8 ,
.Xr graid3 8 ,
-.Xr gsched 8 ,
.Xr gshsec 8 ,
.Xr gstripe 8 ,
.Xr gunion 8 ,
diff --git a/sbin/growfs/growfs.8 b/sbin/growfs/growfs.8
index 9a6076017c74..9b619613f30e 100644
--- a/sbin/growfs/growfs.8
+++ b/sbin/growfs/growfs.8
@@ -61,16 +61,11 @@ The
.Nm
utility extends the size of the file system on the specified special file.
The following options are available:
-.Bl -tag -width indent
+.Bl -tag -width "-s size"
.It Fl N
.Dq Test mode .
Causes the new file system parameters to be printed out without actually
enlarging the file system.
-.It Fl y
-Causes
-.Nm
-to assume yes
-as the answer to all operator questions.
.It Fl s Ar size
Determines the
.Ar size
@@ -87,6 +82,11 @@ This value defaults to the size of the raw partition specified in
(in other words,
.Nm
will enlarge the file system to the size of the entire partition).
+.It Fl y
+Causes
+.Nm
+to assume yes
+as the answer to all operator questions.
.El
.Sh EXIT STATUS
Exit status is 0 on success, and >= 1 on errors.
diff --git a/sbin/ifconfig/ifconfig.8 b/sbin/ifconfig/ifconfig.8
index c9861ccc6481..dfea59dfd229 100644
--- a/sbin/ifconfig/ifconfig.8
+++ b/sbin/ifconfig/ifconfig.8
@@ -502,6 +502,10 @@ Enable driver dependent debugging code; usually, this turns on
extra console error logging.
.It Fl debug
Disable driver dependent debugging code.
+.It Cm allmulti
+Enable promiscuous mode for multicast packets.
+.It Fl allmulti
+Disable promiscuous mode for multicast packets.
.It Cm promisc
Put interface into permanently promiscuous mode.
.It Fl promisc
diff --git a/sbin/ifconfig/ifconfig.c b/sbin/ifconfig/ifconfig.c
index e6ed9015b34b..a0680d09e54c 100644
--- a/sbin/ifconfig/ifconfig.c
+++ b/sbin/ifconfig/ifconfig.c
@@ -2078,6 +2078,8 @@ static struct cmd basic_cmds[] = {
DEF_CMD_ARG("descr", setifdescr),
DEF_CMD("-description", 0, unsetifdescr),
DEF_CMD("-descr", 0, unsetifdescr),
+ DEF_CMD("allmulti", IFF_PALLMULTI, setifflags),
+ DEF_CMD("-allmulti", IFF_PALLMULTI, clearifflags),
DEF_CMD("promisc", IFF_PPROMISC, setifflags),
DEF_CMD("-promisc", IFF_PPROMISC, clearifflags),
DEF_CMD("add", IFF_UP, notealias),
diff --git a/sbin/ifconfig/ifconfig_netlink.c b/sbin/ifconfig/ifconfig_netlink.c
index 729d4ca56545..5a986e840d7f 100644
--- a/sbin/ifconfig/ifconfig_netlink.c
+++ b/sbin/ifconfig/ifconfig_netlink.c
@@ -77,7 +77,7 @@ static const char *IFFBITS[] = {
"STICKYARP", /* 20:0x100000 IFF_STICKYARP*/
"DYING", /* 21:0x200000 IFF_DYING*/
"RENAMING", /* 22:0x400000 IFF_RENAMING*/
- "NOGROUP", /* 23:0x800000 IFF_NOGROUP*/
+ "PALLMULTI", /* 23:0x800000 IFF_PALLMULTI*/
"LOWER_UP", /* 24:0x1000000 IFF_NETLINK_1*/
};
diff --git a/sbin/pfctl/parse.y b/sbin/pfctl/parse.y
index 724ffefcd7d9..f54f24a14a7c 100644
--- a/sbin/pfctl/parse.y
+++ b/sbin/pfctl/parse.y
@@ -326,6 +326,7 @@ static struct pool_opts {
int marker;
#define POM_TYPE 0x01
#define POM_STICKYADDRESS 0x02
+#define POM_ENDPI 0x04
u_int8_t opts;
int type;
int staticport;
@@ -512,7 +513,7 @@ int parseport(char *, struct range *r, int);
%token UPPERLIMIT QUEUE PRIORITY QLIMIT HOGS BUCKETS RTABLE TARGET INTERVAL
%token DNPIPE DNQUEUE RIDENTIFIER
%token LOAD RULESET_OPTIMIZATION PRIO
-%token STICKYADDRESS MAXSRCSTATES MAXSRCNODES SOURCETRACK GLOBAL RULE
+%token STICKYADDRESS ENDPI MAXSRCSTATES MAXSRCNODES SOURCETRACK GLOBAL RULE
%token MAXSRCCONN MAXSRCCONNRATE OVERLOAD FLUSH SLOPPY PFLOW
%token TAGGED TAG IFBOUND FLOATING STATEPOLICY STATEDEFAULTS ROUTE SETTOS
%token DIVERTTO DIVERTREPLY BRIDGE_TO
@@ -4593,6 +4594,14 @@ pool_opt : BITMASK {
pool_opts.marker |= POM_STICKYADDRESS;
pool_opts.opts |= PF_POOL_STICKYADDR;
}
+ | ENDPI {
+ if (pool_opts.marker & POM_ENDPI) {
+ yyerror("endpoint-independent cannot be redefined");
+ YYERROR;
+ }
+ pool_opts.marker |= POM_ENDPI;
+ pool_opts.opts |= PF_POOL_ENDPI;
+ }
| MAPEPORTSET number '/' number '/' number {
if (pool_opts.mape.offset) {
yyerror("map-e-portset cannot be redefined");
@@ -6299,6 +6308,7 @@ lookup(char *s)
{ "dnqueue", DNQUEUE},
{ "drop", DROP},
{ "dup-to", DUPTO},
+ { "endpoint-independent", ENDPI},
{ "ether", ETHER},
{ "fail-policy", FAILPOLICY},
{ "fairq", FAIRQ},
diff --git a/sbin/pfctl/pfctl_parser.c b/sbin/pfctl/pfctl_parser.c
index e71b7b160495..a9416534626b 100644
--- a/sbin/pfctl/pfctl_parser.c
+++ b/sbin/pfctl/pfctl_parser.c
@@ -488,6 +488,8 @@ print_pool(struct pfctl_pool *pool, u_int16_t p1, u_int16_t p2,
}
if (pool->opts & PF_POOL_STICKYADDR)
printf(" sticky-address");
+ if (pool->opts & PF_POOL_ENDPI)
+ printf(" endpoint-independent");
if (id == PF_NAT && p1 == 0 && p2 == 0)
printf(" static-port");
if (pool->mape.offset > 0)
diff --git a/sbin/pfctl/tests/files/pf1021.in b/sbin/pfctl/tests/files/pf1021.in
new file mode 100644
index 000000000000..841b024157c6
--- /dev/null
+++ b/sbin/pfctl/tests/files/pf1021.in
@@ -0,0 +1 @@
+nat on vtnet1 inet from ! (vtnet1) to any -> (vtnet1) endpoint-independent
diff --git a/sbin/pfctl/tests/files/pf1021.ok b/sbin/pfctl/tests/files/pf1021.ok
new file mode 100644
index 000000000000..3b5b84e2e11b
--- /dev/null
+++ b/sbin/pfctl/tests/files/pf1021.ok
@@ -0,0 +1 @@
+nat on vtnet1 inet from ! (vtnet1) to any -> (vtnet1) round-robin endpoint-independent
diff --git a/sbin/pfctl/tests/pfctl_test_list.inc b/sbin/pfctl/tests/pfctl_test_list.inc
index 5d1717200759..1fd31de6ccc4 100644
--- a/sbin/pfctl/tests/pfctl_test_list.inc
+++ b/sbin/pfctl/tests/pfctl_test_list.inc
@@ -129,3 +129,4 @@ PFCTL_TEST(1017, "Ethernet rule with ridentifier and several labels")
PFCTL_TEST(1018, "Test dynamic address mask")
PFCTL_TEST(1019, "Test pflow option")
PFCTL_TEST(1020, "Test hashmark and semicolon comment")
+PFCTL_TEST(1021, "Endpoint-independent")
diff --git a/secure/lib/libcrypto/modules/Makefile b/secure/lib/libcrypto/modules/Makefile
index 0e01eb3b8ef2..69a8470ff20b 100644
--- a/secure/lib/libcrypto/modules/Makefile
+++ b/secure/lib/libcrypto/modules/Makefile
@@ -1,4 +1,4 @@
-SUBDIR= fips legacy
+SUBDIR= legacy
SUBDIR_PARALLEL=
.include
diff --git a/secure/lib/libcrypto/modules/fips/Makefile b/secure/lib/libcrypto/modules/fips/Makefile
deleted file mode 100644
index 0f4889f3ff81..000000000000
--- a/secure/lib/libcrypto/modules/fips/Makefile
+++ /dev/null
@@ -1,340 +0,0 @@
-SHLIB_NAME?= fips.so
-
-CFLAGS+= -DFIPS_MODULE
-
-SRCS+= fips_entry.c fipsprov.c self_test.c self_test_kats.c
-
-.include "../../Makefile.common"
-
-# crypto
-SRCS+= provider_core.c provider_predefined.c \
- core_fetch.c core_algorithm.c core_namemap.c self_test_core.c
-
-SRCS+= cpuid.c ctype.c
-.if defined(ASM_aarch64)
-SRCS+= arm64cpuid.S armcap.c
-ACFLAGS.arm64cpuid.S= -march=armv8-a+crypto
-.elif defined(ASM_amd64)
-SRCS+= x86_64cpuid.S
-.elif defined(ASM_arm)
-SRCS+= armv4cpuid.S armcap.c
-.elif defined(ASM_i386)
-SRCS+= x86cpuid.S
-.elif defined(ASM_powerpc)
-SRCS+= ppccpuid.S ppccap.c
-.elif defined(ASM_powerpc64)
-SRCS+= ppccpuid.S ppccap.c
-.elif defined(ASM_powerpc64le)
-SRCS+= ppccpuid.S ppccap.c
-.else
-SRCS+= mem_clr.c
-.endif
-
-# crypto/aes
-SRCS+= aes_cfb.c aes_ecb.c aes_ige.c aes_misc.c aes_ofb.c aes_wrap.c
-.if defined(ASM_aarch64)
-SRCS+= aes_cbc.c aes_core.c aesv8-armx.S vpaes-armv8.S
-ACFLAGS.aesv8-armx.S= -march=armv8-a+crypto
-.elif defined(ASM_amd64)
-SRCS+= aes-x86_64.S aesni-mb-x86_64.S aesni-sha1-x86_64.S
-SRCS+= aesni-sha256-x86_64.S aesni-x86_64.S bsaes-x86_64.S vpaes-x86_64.S
-.elif defined(ASM_arm)
-SRCS+= aes_cbc.c aes-armv4.S aesv8-armx.S bsaes-armv7.S
-.elif defined(ASM_i386)
-SRCS+= aes-586.S aesni-x86.S vpaes-x86.S
-.elif defined(ASM_powerpc)
-SRCS+= aes_cbc.c aes_core.c aes-ppc.S vpaes-ppc.S aesp8-ppc.S
-.elif defined(ASM_powerpc64)
-SRCS+= aes_cbc.c aes_core.c aes-ppc.S vpaes-ppc.S aesp8-ppc.S
-.elif defined(ASM_powerpc64le)
-SRCS+= aes_cbc.c aes_core.c aes-ppc.S vpaes-ppc.S aesp8-ppc.S
-.else
-SRCS+= aes_cbc.c aes_core.c
-.endif
-
-# crypto/bn
-SRCS+= bn_add.c bn_div.c bn_exp.c bn_lib.c bn_ctx.c bn_mul.c \
- bn_mod.c bn_conv.c bn_rand.c bn_shift.c bn_word.c bn_blind.c \
- bn_kron.c bn_sqrt.c bn_gcd.c bn_prime.c bn_sqr.c \
- bn_recp.c bn_mont.c bn_mpi.c bn_exp2.c bn_gf2m.c bn_nist.c \
- bn_intern.c bn_dh.c bn_rsa_fips186_4.c bn_const.c
-.if defined(ASM_aarch64)
-SRCS+= armv8-mont.S bn_asm.c
-.elif defined(ASM_amd64)
-SRCS+= rsaz-avx2.S rsaz-avx512.S rsaz-x86_64.S rsaz_exp.c rsaz_exp_x2.c
-SRCS+= x86_64-gcc.c x86_64-gf2m.S x86_64-mont.S x86_64-mont5.S
-.elif defined(ASM_arm)
-SRCS+= armv4-gf2m.S armv4-mont.S bn_asm.c
-.elif defined(ASM_i386)
-SRCS+= bn-586.S co-586.S x86-gf2m.S x86-mont.S
-.elif defined(ASM_powerpc)
-SRCS+= bn_ppc.c bn-ppc.S ppc-mont.S
-.elif defined(ASM_powerpc64)
-SRCS+= bn_ppc.c bn-ppc.S ppc-mont.S
-.elif defined(ASM_powerpc64le)
-SRCS+= bn_ppc.c bn-ppc.S ppc-mont.S
-.else
-SRCS+= bn_asm.c
-.endif
-
-# crypto/buffer
-SRCS+= buffer.c
-
-# crypto/cmac
-SRCS+= cmac.c
-
-# crypto/des
-SRCS+= set_key.c ecb3_enc.c
-.if defined(ASM_i386)
-SRCS+= crypt586.S des-586.S
-.else
-SRCS+= des_enc.c fcrypt_b.c
-.endif
-
-# crypto/dh
-SRCS+= dh_lib.c dh_key.c dh_group_params.c dh_check.c dh_backend.c dh_gen.c \
- dh_kdf.c
-
-# crypto/dsa
-SRCS+= dsa_sign.c dsa_vrf.c dsa_lib.c dsa_ossl.c dsa_check.c \
- dsa_key.c dsa_backend.c dsa_gen.c
-
-# crypto/ec
-SRCS+= ec_lib.c ecp_smpl.c ecp_mont.c ecp_nist.c ec_cvt.c ec_mult.c \
- ec_curve.c ec_check.c ec_key.c ec_kmeth.c ecx_key.c ec_asn1.c \
- ec2_smpl.c \
- ecp_oct.c ec2_oct.c ec_oct.c ecdh_ossl.c \
- ecdsa_ossl.c ecdsa_sign.c ecdsa_vrf.c curve25519.c \
- curve448/f_generic.c curve448/scalar.c \
- curve448/curve448_tables.c curve448/eddsa.c curve448/curve448.c \
- ec_backend.c ecx_backend.c ecdh_kdf.c curve448/arch_64/f_impl64.c \
- curve448/arch_32/f_impl32.c
-SRCS+= cryptlib.c params.c params_from_text.c bsearch.c ex_data.c o_str.c \
- threads_pthread.c threads_none.c initthread.c \
- context.c sparse_array.c asn1_dsa.c packet.c param_build.c \
- param_build_set.c der_writer.c threads_lib.c params_dup.c
-
-.include
-.if ${MACHINE_ABI:Mlittle-endian} && ${MACHINE_ABI:Mlong64}
-SRCS+= ecp_nistp224.c ecp_nistp256.c ecp_nistp521.c ecp_nistputil.c
-.endif
-.if defined(ASM_aarch64)
-SRCS+= ecp_nistz256-armv8.S ecp_nistz256.c
-.elif defined(ASM_amd64)
-SRCS+= ecp_nistz256-x86_64.S ecp_nistz256.c x25519-x86_64.S
-.elif defined(ASM_arm)
-SRCS+= ecp_nistz256-armv4.S ecp_nistz256.c
-.elif defined(ASM_i386)
-SRCS+= ecp_nistz256-x86.S ecp_nistz256.c
-.elif defined(ASM_powerpc64)
-SRCS+= ecp_nistp521-ppc64.S ecp_nistz256-ppc64.S ecp_nistz256.c ecp_ppc.c x25519-ppc64.S
-.elif defined(ASM_powerpc64le)
-SRCS+= ecp_nistp521-ppc64.S ecp_nistz256-ppc64.S ecp_nistz256.c ecp_ppc.c x25519-ppc64.S
-.endif
-
-# crypto/evp
-SRCS+= digest.c evp_enc.c evp_lib.c evp_fetch.c evp_utils.c \
- mac_lib.c mac_meth.c keymgmt_meth.c keymgmt_lib.c kdf_lib.c kdf_meth.c \
- m_sigver.c pmeth_lib.c signature.c p_lib.c pmeth_gn.c exchange.c \
- evp_rand.c asymcipher.c kem.c dh_support.c ec_support.c pmeth_check.c
-
-# crypto/ffc
-SRCS+= ffc_params.c ffc_params_generate.c ffc_key_generate.c \
- ffc_params_validate.c ffc_key_validate.c ffc_backend.c \
- ffc_dh.c
-
-# crypto/hmac
-SRCS+= hmac.c
-
-# crypto/lhash
-SRCS+= lhash.c
-
-# crypto/modes
-SRCS+= cbc128.c ctr128.c cfb128.c ofb128.c gcm128.c ccm128.c xts128.c
-SRCS+= wrap128.c
-.if defined(ASM_aarch64)
-SRCS+= ghashv8-armx.S aes-gcm-armv8_64.S
-ACFLAGS.ghashv8-armx.S= -march=armv8-a+crypto
-.elif defined(ASM_amd64)
-SRCS+= aesni-gcm-x86_64.S ghash-x86_64.S
-.elif defined(ASM_arm)
-SRCS+= ghash-armv4.S ghashv8-armx.S
-.elif defined(ASM_i386)
-SRCS+= ghash-x86.S
-.elif defined(ASM_powerpc)
-SRCS+= ghashp8-ppc.S
-.elif defined(ASM_powerpc64)
-SRCS+= ghashp8-ppc.S
-.elif defined(ASM_powerpc64le)
-SRCS+= ghashp8-ppc.S
-.endif
-
-# crypto/property
-SRCS+= property_string.c property_parse.c property_query.c property.c defn_cache.c
-
-# crypto/rand
-SRCS+= rand_lib.c
-
-# crypto/rsa
-SRCS+= rsa_ossl.c rsa_gen.c rsa_lib.c rsa_sign.c rsa_pk1.c \
- rsa_none.c rsa_oaep.c rsa_chk.c rsa_pss.c rsa_x931.c rsa_crpt.c \
- rsa_sp800_56b_gen.c rsa_sp800_56b_check.c rsa_backend.c \
- rsa_mp_names.c rsa_schemes.c
-SRCS+= rsa_acvp_test_params.c
-
-# crypto/sha
-SRCS+= sha1dgst.c sha256.c sha512.c sha3.c
-.if defined(ASM_aarch64)
-SRCS+= keccak1600-armv8.S sha1-armv8.S sha256-armv8.S sha512-armv8.S
-.elif defined(ASM_amd64)
-SRCS+= keccak1600-x86_64.S sha1-mb-x86_64.S sha1-x86_64.S
-SRCS+= sha256-mb-x86_64.S sha256-x86_64.S sha512-x86_64.S
-.elif defined(ASM_arm)
-SRCS+= keccak1600-armv4.S sha1-armv4-large.S sha256-armv4.S sha512-armv4.S
-.elif defined(ASM_i386)
-SRCS+= keccak1600.c sha1-586.S sha256-586.S sha512-586.S
-.elif defined(ASM_powerpc)
-SRCS+= keccak1600.c sha_ppc.c sha1-ppc.S sha256-ppc.S sha512-ppc.S sha256p8-ppc.S sha512p8-ppc.S
-.elif defined(ASM_powerpc64)
-SRCS+= keccak1600-ppc64.S sha_ppc.c sha1-ppc.S sha256-ppc.S sha512-ppc.S sha256p8-ppc.S sha512p8-ppc.S
-.elif defined(ASM_powerpc64le)
-SRCS+= keccak1600-ppc64.S sha_ppc.c sha1-ppc.S sha256-ppc.S sha512-ppc.S sha256p8-ppc.S sha512p8-ppc.S
-.else
-SRCS+= keccak1600.c
-.endif
-
-# crypto/stack
-SRCS+= stack.c
-
-# common
-SRCS+= capabilities.c bio_prov.c digest_to_nid.c \
- securitycheck.c provider_seeding.c
-SRCS+= securitycheck_fips.c
-
-# common/der
-SRCS+= der_rsa_gen.c der_rsa_key.c
-SRCS+= der_rsa_sig.c
-
-SRCS+= der_dsa_gen.c der_dsa_key.c
-SRCS+= der_dsa_sig.c
-
-SRCS+= der_ec_gen.c der_ec_key.c
-SRCS+= der_ec_sig.c
-
-SRCS+= der_ecx_gen.c der_ecx_key.c
-
-SRCS+= der_wrap_gen.c
-
-# asymciphers
-SRCS+= rsa_enc.c
-
-# ciphers
-SRCS+= ciphercommon.c ciphercommon_hw.c ciphercommon_block.c \
- ciphercommon_gcm.c ciphercommon_gcm_hw.c \
- ciphercommon_ccm.c ciphercommon_ccm_hw.c
-SRCS+= cipher_aes.c cipher_aes_hw.c \
- cipher_aes_xts.c cipher_aes_xts_hw.c \
- cipher_aes_gcm.c cipher_aes_gcm_hw.c \
- cipher_aes_ccm.c cipher_aes_ccm_hw.c \
- cipher_aes_wrp.c \
- cipher_aes_cbc_hmac_sha.c \
- cipher_aes_cbc_hmac_sha256_hw.c cipher_aes_cbc_hmac_sha1_hw.c \
- cipher_cts.c
-SRCS+= cipher_aes_xts_fips.c
-SRCS+= cipher_tdes.c cipher_tdes_common.c cipher_tdes_hw.c
-
-# digests
-SRCS+= digestcommon.c
-SRCS+= sha2_prov.c
-SRCS+= sha3_prov.c
-
-# exchange
-SRCS+= dh_exch.c
-SRCS+= ecx_exch.c
-SRCS+= ecdh_exch.c
-SRCS+= kdf_exch.c
-
-# kdfs
-SRCS+= tls1_prf.c
-SRCS+= hkdf.c
-SRCS+= kbkdf.c
-SRCS+= pbkdf2.c
-SRCS+= pbkdf2_fips.c
-SRCS+= sskdf.c
-SRCS+= sshkdf.c
-SRCS+= x942kdf.c
-
-# kem
-SRCS+= rsa_kem.c
-
-# keymgmt
-SRCS+= dh_kmgmt.c
-SRCS+= dsa_kmgmt.c
-SRCS+= ec_kmgmt.c
-SRCS+= ecx_kmgmt.c
-SRCS+= kdf_legacy_kmgmt.c
-SRCS+= mac_legacy_kmgmt.c
-SRCS+= rsa_kmgmt.c
-
-# macs
-SRCS+= gmac_prov.c
-SRCS+= hmac_prov.c
-SRCS+= kmac_prov.c
-SRCS+= cmac_prov.c
-
-# rands
-SRCS+= drbg.c test_rng.c drbg_ctr.c drbg_hash.c drbg_hmac.c crngt.c
-
-# signature
-SRCS+= dsa_sig.c
-SRCS+= eddsa_sig.c ecdsa_sig.c
-SRCS+= mac_legacy_sig.c
-SRCS+= rsa_sig.c
-
-# ssl
-SRCS+= record/tls_pad.c s3_cbc.c
-
-.include
-
-.if defined(ASM_${MACHINE_CPUARCH})
-.PATH: ${SRCTOP}/sys/crypto/openssl/${MACHINE_CPUARCH}
-.if defined(ASM_amd64)
-.PATH: ${LCRYPTO_SRC}/crypto/bn/asm
-.endif
-.elif defined(ASM_${MACHINE_ARCH})
-.PATH: ${SRCTOP}/sys/crypto/openssl/${MACHINE_ARCH}
-.endif
-
-.PATH: ${LCRYPTO_SRC}/crypto \
- ${LCRYPTO_SRC}/crypto/aes \
- ${LCRYPTO_SRC}/crypto/bio \
- ${LCRYPTO_SRC}/crypto/bn \
- ${LCRYPTO_SRC}/crypto/buffer \
- ${LCRYPTO_SRC}/crypto/cmac \
- ${LCRYPTO_SRC}/crypto/des \
- ${LCRYPTO_SRC}/crypto/dh \
- ${LCRYPTO_SRC}/crypto/dsa \
- ${LCRYPTO_SRC}/crypto/ec \
- ${LCRYPTO_SRC}/crypto/evp \
- ${LCRYPTO_SRC}/crypto/ffc \
- ${LCRYPTO_SRC}/crypto/hmac \
- ${LCRYPTO_SRC}/crypto/lhash \
- ${LCRYPTO_SRC}/crypto/modes \
- ${LCRYPTO_SRC}/crypto/property \
- ${LCRYPTO_SRC}/crypto/rand \
- ${LCRYPTO_SRC}/crypto/rsa \
- ${LCRYPTO_SRC}/crypto/sha \
- ${LCRYPTO_SRC}/crypto/stack \
- ${LCRYPTO_SRC}/providers/fips \
- ${LCRYPTO_SRC}/providers/common/der \
- ${LCRYPTO_SRC}/providers/implementations/asymciphers \
- ${LCRYPTO_SRC}/providers/implementations/ciphers \
- ${LCRYPTO_SRC}/providers/implementations/digests \
- ${LCRYPTO_SRC}/providers/implementations/exchange \
- ${LCRYPTO_SRC}/providers/implementations/kdfs \
- ${LCRYPTO_SRC}/providers/implementations/kem \
- ${LCRYPTO_SRC}/providers/implementations/keymgmt \
- ${LCRYPTO_SRC}/providers/implementations/macs \
- ${LCRYPTO_SRC}/providers/implementations/rands \
- ${LCRYPTO_SRC}/providers/implementations/signature \
- ${LCRYPTO_SRC}/ssl
diff --git a/share/man/man4/acpi_ged.4 b/share/man/man4/acpi_ged.4
index c87c7b3e97c9..98baabdde796 100644
--- a/share/man/man4/acpi_ged.4
+++ b/share/man/man4/acpi_ged.4
@@ -53,7 +53,7 @@ This may generate optionally ACPI notify for another device.
The
.Nm
device driver first appeared in
-.Fx 14.0 .
+.Fx 13.3 .
.Sh AUTHORS
.An -nosplit
The
diff --git a/share/man/man4/gve.4 b/share/man/man4/gve.4
index 54e59b86108b..95c125507bd5 100644
--- a/share/man/man4/gve.4
+++ b/share/man/man4/gve.4
@@ -208,7 +208,7 @@ Please email gvnic-drivers@google.com with the specifics of the issue encountere
The
.Nm
device driver first appeared in
-.Fx 14.0 .
+.Fx 13.3 .
.Sh AUTHORS
The
.Nm
diff --git a/share/man/man4/pf.4 b/share/man/man4/pf.4
index 645f31e6e395..3855d07faead 100644
--- a/share/man/man4/pf.4
+++ b/share/man/man4/pf.4
@@ -26,7 +26,7 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.Dd October 20, 2023
+.Dd September 6, 2024
.Dt PF 4
.Os
.Sh NAME
@@ -80,17 +80,30 @@ The following
tunables are available.
.Bl -tag -width indent
.It Va net.pf.states_hashsize
-Size of hash tables that store states.
+Size of hash table that stores states.
Should be power of 2.
Default value is 131072.
.It Va net.pf.source_nodes_hashsize
-Size of hash table that store source nodes.
+Size of hash table that stores source nodes.
+Should be power of 2.
+Default value is 32768.
+.It Va net.pf.rule_tag_hashsize
+Size of the hash table that stores tags.
+.It Va net.pf.udpendpoint_hashsize
+Size of hash table that store UDP endpoint mappings.
Should be power of 2.
Default value is 32768.
.It Va net.pf.default_to_drop
This value overrides
.Cd "options PF_DEFAULT_TO_DROP"
from kernel configuration file.
+.It Va net.pf.filter_local
+This tells
+.Nm
+to also filter on the loopback output hook.
+This is typically used to allow redirect rules to adjust the source address.
+.It net.pf.request_maxcount
+The maximum number of items in a single ioctl call.
.El
.Pp
Read only
diff --git a/share/man/man4/ure.4 b/share/man/man4/ure.4
index 942764109dd8..f5452fa8463b 100644
--- a/share/man/man4/ure.4
+++ b/share/man/man4/ure.4
@@ -1,3 +1,5 @@
+.\"-
+.\" SPDX-License-Identifier: BSD-2-Clause
.\"
.\" Copyright (c) 2015-2016 Kevin Lo
.\" All rights reserved.
@@ -23,12 +25,12 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.Dd May 3, 2024
+.Dd September 3, 2024
.Dt URE 4
.Os
.Sh NAME
.Nm ure
-.Nd "RealTek RTL8152/RTL8153/RTL8153B/RTL8156/RTL8156B USB to Ethernet controller driver"
+.Nd RealTek RTL8152/RTL8153/RTL8156 USB Ethernet driver
.Sh SYNOPSIS
To compile this driver into the kernel,
place the following lines in your
@@ -52,15 +54,20 @@ if_ure_load="YES"
The
.Nm
driver provides support for USB Ethernet adapters based on the RealTek
-RealTek RTL8152 and RTL8153 USB Ethernet controllers.
+RTL8152, RTL8153/RTL8153B, and RTL8156/RTL8156B USB Ethernet controllers,
+as well as USB Ethernet PHYs provided by
+.Xr rgephy 4 .
.Pp
-NICs based on the RTL8152 are capable of 10 and 100Mbps speeds.
-NICs based on the RTL8153 are capable of 10, 100 and 1000Mbps operation.
+NICs based on the RTL8152 are capable of 10 and 100Mbps.
+NICs based on the RTL8153 or provided by
+.Xr rgephy 4
+are capable of 10, 100, and 1000Mbps.
+NICs based on the RTL8156 are capable of 10, 100, 1000, and 2500Mbps operation.
.Pp
The
.Nm
driver supports the following media types:
-.Bl -tag -width ".Cm 10baseT/UTP"
+.Bl -tag -width "10baseT/UTP"
.It Cm autoselect
Enable auto selection of the media type and options.
The user can manually override
@@ -86,7 +93,7 @@ or
.Cm half-duplex
modes.
.It Cm 1000baseTX
-Set 1000baseTX operation over twisted pair.
+Set 1000baseTX (Gigabit Ethernet) operation over twisted pair.
The RealTek gigE chips support 1000Mbps in
.Cm full-duplex
mode only.
@@ -99,12 +106,12 @@ mode only.
.Pp
The
.Nm
-driver supports the following media options:
-.Bl -tag -width ".Cm full-duplex"
+driver supports the following media options for 10/100 operation:
+.Bl -tag -width "full-duplex"
.It Cm full-duplex
-Force full duplex operation.
+Force full-duplex operation.
.It Cm half-duplex
-Force half duplex operation.
+Force half-duplex operation.
.El
.Pp
For more information on configuring this device, see
diff --git a/share/man/man4/wsp.4 b/share/man/man4/wsp.4
index 39660a53ee9a..83a4421fa2ff 100644
--- a/share/man/man4/wsp.4
+++ b/share/man/man4/wsp.4
@@ -65,6 +65,17 @@ Pointer sensitivity can be controlled using the sysctl tunable
Tap to left-click can be controlled using the sysctl tunable
.Nm hw.usb.wsp.enable_single_tap_clicks ,
set to 0 to disable single tap clicks or 1 to enable them (default).
+Movement on the trackpad following a partially-released click can be
+controlled using the sysctl tunable
+.Nm hw.usb.wsp.enable_single_tap_movement ,
+set to 0 to disable the movement on the trackpad until a full release
+or 1 to allow the continued movement (default).
+.Nm hw.usb.wsp.max_finger_area
+defines the maximum area on the trackpad which is registered as a
+finger (lower for greater palm detection).
+.Nm hw.usb.wsp.max_double_tap_distance
+defines the maximum distance between two finger clicks or taps which may
+register as a double-click.
Z-Axis sensitivity can be controlled using the sysctl tunable
.Nm hw.usb.wsp.z_factor .
Z-Axis inversion can be controlled using the sysctl tunable
diff --git a/share/man/man5/pf.conf.5 b/share/man/man5/pf.conf.5
index f04b0799741e..5aa936d509ed 100644
--- a/share/man/man5/pf.conf.5
+++ b/share/man/man5/pf.conf.5
@@ -27,7 +27,7 @@
.\" ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
-.Dd June 24, 2024
+.Dd September 4, 2024
.Dt PF.CONF 5
.Os
.Sh NAME
@@ -2278,6 +2278,16 @@ from modifying the source port on TCP and UDP packets.
With
.Ar nat
rules, the
+.It Ar endpoint-independent
+With
+.Ar nat
+rules, the
+.Ar endpoint-independent
+option caues
+.Xr pf 4
+to always map connections from a UDP source address and port to the same
+NAT address and port.
+This feature implements "full-cone" NAT behavior.
.Ar map-e-portset
option enables the source port translation of MAP-E (RFC 7597) Customer Edge.
In order to make the host act as a MAP-E Customer Edge, setting up a tunneling
diff --git a/share/man/man8/diskless.8 b/share/man/man8/diskless.8
index 8839e27e11ba..cc49854ae850 100644
--- a/share/man/man8/diskless.8
+++ b/share/man/man8/diskless.8
@@ -1,3 +1,6 @@
+.\"-
+.\" SPDX-License-Identifier: BSD-3-Clause
+.\"
.\" Copyright (c) 1994 Gordon W. Ross, Theo de Raadt
.\" Updated by Luigi Rizzo, Robert Watson
.\" All rights reserved.
@@ -24,12 +27,12 @@
.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
-.Dd May 3, 2020
+.Dd August 11, 2024
.Dt DISKLESS 8
.Os
.Sh NAME
.Nm diskless
-.Nd booting a system over the network
+.Nd booting a system over the network with PXE
.Sh DESCRIPTION
The ability to boot a machine over the network is useful for
.Em diskless
diff --git a/stand/efi/Makefile.inc b/stand/efi/Makefile.inc
index d670024cd056..2c9bad14dd20 100644
--- a/stand/efi/Makefile.inc
+++ b/stand/efi/Makefile.inc
@@ -25,8 +25,8 @@ EFI_TARGET= binary
# XXX: doesn't work with llvm-objcopy!
OBJCOPY=${WORLDTMP}/usr/bin/objcopy
-# Arbitrarily set the PE/COFF header timestamps to 1 Jan 2016 00:00:00
+# Arbitrarily set the PE/COFF header timestamps to 1 Jan 2024 00:00:00
# for build reproducibility.
-SOURCE_DATE_EPOCH?=1451606400
+SOURCE_DATE_EPOCH?=1704067200
.include "../Makefile.inc"
diff --git a/stand/man/loader.efi.8 b/stand/man/loader.efi.8
index a0c0df9a9c35..3527d8b66a99 100644
--- a/stand/man/loader.efi.8
+++ b/stand/man/loader.efi.8
@@ -30,7 +30,7 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.Dd August 20, 2024
+.Dd September 3, 2024
.Dt LOADER.EFI 8
.Os
.Sh NAME
@@ -450,9 +450,6 @@ copy the loader to the default location:
# cp /boot/loader.efi /boot/efi/EFI/BOOT/BOOTX64.EFI
.Ed
.Pp
-.Bd -literal -offset indent
-# umount /boot/efi
-.Ed
Finally, if you mounted the ESP, you may wish to unmount it.
.Bd -literal -offset indent
# umount /boot/efi
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 025c3c365de5..f4b3b9702e00 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -1382,7 +1382,7 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
*/
for (x = 0; x < NGDT; x++) {
if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
- x != GUSERLDT_SEL && x != (GUSERLDT_SEL) + 1)
+ x != GUSERLDT_SEL && x != (GUSERLDT_SEL + 1))
ssdtosd(&gdt_segs[x], &gdt[x]);
}
gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&pc->pc_common_tss;
diff --git a/sys/amd64/include/vmm_dev.h b/sys/amd64/include/vmm_dev.h
index 5f347e46b9c4..b77b0ef5d996 100644
--- a/sys/amd64/include/vmm_dev.h
+++ b/sys/amd64/include/vmm_dev.h
@@ -29,7 +29,8 @@
#ifndef _VMM_DEV_H_
#define _VMM_DEV_H_
-struct vm_snapshot_meta;
+#include
+#include
struct vm_memmap {
vm_paddr_t gpa;
diff --git a/sys/arm/freescale/imx/imx_gpio.c b/sys/arm/freescale/imx/imx_gpio.c
index c5e92992a36b..7610d28af90e 100644
--- a/sys/arm/freescale/imx/imx_gpio.c
+++ b/sys/arm/freescale/imx/imx_gpio.c
@@ -134,6 +134,7 @@ static struct ofw_compat_data compat_data[] = {
{"fsl,imx6q-gpio", 1},
{"fsl,imx53-gpio", 1},
{"fsl,imx51-gpio", 1},
+ {"fsl,imx35-gpio", 1},
{NULL, 0}
};
diff --git a/sys/arm/ti/ti_pruss.c b/sys/arm/ti/ti_pruss.c
index 85d075419fe8..b7a04f2cfb42 100644
--- a/sys/arm/ti/ti_pruss.c
+++ b/sys/arm/ti/ti_pruss.c
@@ -184,9 +184,6 @@ ti_pruss_irq_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
sc = dev->si_drv1;
irqs = malloc(sizeof(struct ctl), M_DEVBUF, M_WAITOK);
- if (!irqs)
- return (ENOMEM);
-
irqs->cnt = sc->tstamps.ctl.cnt;
irqs->idx = sc->tstamps.ctl.idx;
diff --git a/sys/arm64/arm64/cmn600.c b/sys/arm64/arm64/cmn600.c
index 4e3be8fee40e..530cdcdc3d06 100644
--- a/sys/arm64/arm64/cmn600.c
+++ b/sys/arm64/arm64/cmn600.c
@@ -332,9 +332,6 @@ cmn600_create_node(struct cmn600_softc *sc, off_t node_offset,
int i;
node = malloc(sizeof(struct cmn600_node), M_DEVBUF, M_WAITOK);
- if (node == NULL)
- return (NULL);
-
node->sc = sc;
node->nd_offset = node_offset;
node->nd_parent = parent;
@@ -399,8 +396,6 @@ cmn600_create_node(struct cmn600_softc *sc, off_t node_offset,
node->nd_children = (struct cmn600_node **)mallocarray(
node->nd_child_count, sizeof(struct cmn600_node *), M_DEVBUF,
M_WAITOK);
- if (node->nd_children == NULL)
- goto FAIL;
for (i = 0; i < node->nd_child_count; i++) {
val = node->nd_read8(node, child_offset + (i * 8));
node->nd_children[i] = cmn600_create_node(sc, val &
@@ -420,9 +415,6 @@ cmn600_create_node(struct cmn600_softc *sc, off_t node_offset,
break;
}
return (node);
-FAIL:
- free(node, M_DEVBUF);
- return (NULL);
}
static void
diff --git a/sys/arm64/arm64/efirt_machdep.c b/sys/arm64/arm64/efirt_machdep.c
index 5790d722a8da..aa051c88815b 100644
--- a/sys/arm64/arm64/efirt_machdep.c
+++ b/sys/arm64/arm64/efirt_machdep.c
@@ -215,7 +215,7 @@ efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
p->md_phys, mode, p->md_pages);
}
- l3_attr = ATTR_DEFAULT | ATTR_S1_IDX(mode) |
+ l3_attr = ATTR_AF | pmap_sh_attr | ATTR_S1_IDX(mode) |
ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_nG | L3_PAGE;
if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP)
l3_attr |= ATTR_S1_XN;
diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S
index feb6c7963b58..ef88ae48ff52 100644
--- a/sys/arm64/arm64/locore.S
+++ b/sys/arm64/arm64/locore.S
@@ -224,6 +224,7 @@ ENTRY(_start)
* x27 = TTBR0 table phys addr
* x26 = Kernel L1 table phys addr
* x24 = TTBR1 table phys addr
+ * x22 = PTE shareability attributes
*/
/* Enable the mmu */
@@ -303,6 +304,16 @@ virtdone:
str x27, [PTR(0), #BP_KERN_TTBR0]
str x23, [PTR(0), #BP_BOOT_EL]
+ /* Set this before it's used in kasan_init_early */
+#ifdef __CHERI_PURE_CAPABILITY__
+ adrp c1, :got:pmap_sh_attr
+ ldr c1, [c1, :got_lo12:pmap_sh_attr]
+ str x22, [c1]
+#else
+ adrp x1, pmap_sh_attr
+ str x22, [x1, :lo12:pmap_sh_attr]
+#endif
+
#ifdef KASAN
/* Save bootparams */
mov x19, x0
@@ -725,6 +736,30 @@ LENTRY(create_pagetables)
cmp PTR(6), PTR(27)
b.lo 1b
+ /*
+ * Find the shareability attribute we should use. If FEAT_LPA2 is
+ * enabled then the shareability field is moved from the page table
+ * to tcr_el1 and the bits in the page table are reused by the
+ * address field.
+ */
+#if PAGE_SIZE == PAGE_SIZE_4K
+#define LPA2_MASK ID_AA64MMFR0_TGran4_MASK
+#define LPA2_VAL ID_AA64MMFR0_TGran4_LPA2
+#elif PAGE_SIZE == PAGE_SIZE_16K
+#define LPA2_MASK ID_AA64MMFR0_TGran16_MASK
+#define LPA2_VAL ID_AA64MMFR0_TGran16_LPA2
+#else
+#error Unsupported page size
+#endif
+ mrs x6, id_aa64mmfr0_el1
+ mov x7, LPA2_VAL
+ and x6, x6, LPA2_MASK
+ cmp x6, x7
+ ldr x22, =(ATTR_SH(ATTR_SH_IS))
+ csel x22, xzr, x22, eq
+#undef LPA2_MASK
+#undef LPA2_VAL
+
/*
* Build the TTBR1 maps.
*/
@@ -1013,11 +1048,13 @@ LENTRY(build_l2_block_pagetable)
orr x12, x12, #(ATTR_LC_ENABLED)
orr x12, x12, #(ATTR_CDBM | ATTR_SC)
#endif
- orr x12, x12, #(ATTR_DEFAULT)
+ orr x12, x12, #(ATTR_AF)
orr x12, x12, #(ATTR_S1_UXN)
#ifdef __ARM_FEATURE_BTI_DEFAULT
orr x12, x12, #(ATTR_S1_GP)
#endif
+ /* Set the shareability attribute */
+ orr x12, x12, x22
/* Only use the output address bits */
lsr x9, x9, #L2_SHIFT
@@ -1092,11 +1129,13 @@ LENTRY(build_l3_page_pagetable)
#if __has_feature(capabilities)
orr x12, x12, #(ATTR_CAP_RW)
#endif
- orr x12, x12, #(ATTR_DEFAULT)
+ orr x12, x12, #(ATTR_AF)
orr x12, x12, #(ATTR_S1_UXN)
#ifdef __ARM_FEATURE_BTI_DEFAULT
orr x12, x12, #(ATTR_S1_GP)
#endif
+ /* Set the shareability attribute */
+ orr x12, x12, x22
/* Only use the output address bits */
lsr x9, x9, #L3_SHIFT
@@ -1166,6 +1205,13 @@ LENTRY(start_mmu)
* to 1 only if the ASIDBits field equals 0b0010.
*/
ldr x2, tcr
+
+ /* If x22 contains a non-zero value then LPA2 is not implemented */
+ cbnz x22, .Lno_lpa2
+ ldr x3, =(TCR_DS)
+ orr x2, x2, x3
+.Lno_lpa2:
+
mrs x3, id_aa64mmfr0_el1
/* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */
@@ -1231,8 +1277,9 @@ tcr:
#define TCR_MORELLO 0
#endif
- .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \
- TCR_CACHE_ATTRS | TCR_SMP_ATTRS | TCR_MORELLO)
+ .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \
+ TCR_SH1_IS | TCR_ORGN1_WBWA | TCR_IRGN1_WBWA | \
+ TCR_SH0_IS | TCR_ORGN0_WBWA | TCR_IRGN0_WBWA | TCR_MORELLO)
sctlr_set:
/* Bits to set */
.quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \
diff --git a/sys/arm64/arm64/minidump_machdep.c b/sys/arm64/arm64/minidump_machdep.c
index ec0ba0d405a9..4510fc10d764 100644
--- a/sys/arm64/arm64/minidump_machdep.c
+++ b/sys/arm64/arm64/minidump_machdep.c
@@ -314,8 +314,8 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
for (i = 0; i < Ln_ENTRIES; i++) {
for (j = 0; j < Ln_ENTRIES; j++) {
tmpbuffer[j] = (pa + i * L2_SIZE +
- j * PAGE_SIZE) | ATTR_DEFAULT |
- L3_PAGE;
+ j * PAGE_SIZE) | ATTR_AF |
+ pmap_sh_attr | L3_PAGE;
}
error = blk_write(di, (char *)&tmpbuffer, 0,
PAGE_SIZE);
@@ -334,7 +334,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
/* Generate fake l3 entries based upon the l1 entry */
for (i = 0; i < Ln_ENTRIES; i++) {
tmpbuffer[i] = (pa + i * PAGE_SIZE) |
- ATTR_DEFAULT | L3_PAGE;
+ ATTR_AF | pmap_sh_attr | L3_PAGE;
}
error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
if (error)
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index bafa13148e21..7970f375d9c6 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -189,8 +189,8 @@
#else
#define ATTR_KERN_GP 0
#endif
-#define PMAP_SAN_PTE_BITS (ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | \
- ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))
+#define PMAP_SAN_PTE_BITS (ATTR_AF | ATTR_S1_XN | pmap_sh_attr | \
+ ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))
struct pmap_large_md_page {
struct rwlock pv_lock;
@@ -366,6 +366,8 @@ static u_int physmap_idx;
static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"VM/pmap parameters");
+pt_entry_t pmap_sh_attr __read_mostly = ATTR_SH(ATTR_SH_IS);
+
#if PAGE_SIZE == PAGE_SIZE_4K
#define L1_BLOCKS_SUPPORTED 1
#else
@@ -1266,7 +1268,7 @@ pmap_bootstrap_l2_block(struct pmap_bootstrap_state *state, int i)
MPASS((state->pa & L2_OFFSET) == 0);
MPASS(state->l2[l2_slot] == 0);
pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(state->pa) |
- ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP |
+ ATTR_AF | pmap_sh_attr | ATTR_S1_XN | ATTR_KERN_GP |
#if __has_feature(capabilities)
ATTR_CAP_RW |
#endif
@@ -1319,7 +1321,7 @@ pmap_bootstrap_l3_page(struct pmap_bootstrap_state *state, int i)
MPASS((state->pa & L3_OFFSET) == 0);
MPASS(state->l3[l3_slot] == 0);
pmap_store(&state->l3[l3_slot], PHYS_TO_PTE(state->pa) |
- ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP |
+ ATTR_AF | pmap_sh_attr | ATTR_S1_XN | ATTR_KERN_GP |
#if __has_feature(capabilities)
ATTR_CAP_RW |
#endif
@@ -1364,7 +1366,8 @@ pmap_bootstrap_dmap(void)
MPASS((bs_state.pa & L1_OFFSET) == 0);
pmap_store(
&bs_state.l1[pmap_l1_index(bs_state.va)],
- PHYS_TO_PTE(bs_state.pa) | ATTR_DEFAULT |
+ PHYS_TO_PTE(bs_state.pa) | ATTR_AF |
+ pmap_sh_attr |
#if __has_feature(capabilities)
ATTR_CAP_RW |
#endif
@@ -2272,8 +2275,8 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
KASSERT((size & PAGE_MASK) == 0,
("pmap_kenter: Mapping is not page-sized"));
- attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
- ATTR_KERN_GP | ATTR_S1_IDX(mode);
+ attr = ATTR_AF | pmap_sh_attr | ATTR_S1_AP(ATTR_S1_AP_RW) |
+ ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(mode);
old_l3e = 0;
va = sva;
while (size != 0) {
@@ -2492,7 +2495,8 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
("pmap_qenter: Invalid level %d", lvl));
m = ma[i];
- attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
+ attr = ATTR_AF | pmap_sh_attr |
+ ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
#if __has_feature(capabilities)
ATTR_CAP_RW |
#endif
@@ -5320,7 +5324,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if ((m->oflags & VPO_UNMANAGED) == 0)
VM_PAGE_OBJECT_BUSY_ASSERT(m);
pa = VM_PAGE_TO_PHYS(m);
- new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_DEFAULT | L3_PAGE);
+ new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_AF | pmap_sh_attr |
+ L3_PAGE);
new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr);
new_l3 |= pmap_pte_prot(pmap, prot, flags, m, va);
if ((flags & PMAP_ENTER_WIRED) != 0)
@@ -5661,13 +5666,13 @@ pmap_enter_l2_rx(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
KASSERT(ADDR_IS_CANONICAL(va),
("%s: Address not in canonical form: %lx", __func__, va));
- new_l2 = (pd_entry_t)(VM_PAGE_TO_PTE(m) | ATTR_DEFAULT |
+ new_l2 = (pd_entry_t)(VM_PAGE_TO_PTE(m) | pmap_sh_attr |
ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
L2_BLOCK);
- if ((m->oflags & VPO_UNMANAGED) == 0) {
+ if ((m->oflags & VPO_UNMANAGED) == 0)
new_l2 |= ATTR_SW_MANAGED;
- new_l2 &= ~ATTR_AF;
- }
+ else
+ new_l2 |= ATTR_AF;
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
new_l2 |= ATTR_S1_XN;
@@ -5893,13 +5898,13 @@ pmap_enter_l3c_rx(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *ml3p,
KASSERT(ADDR_IS_CANONICAL(va),
("%s: Address not in canonical form: %lx", __func__, va));
- l3e = VM_PAGE_TO_PTE(m) | ATTR_DEFAULT |
+ l3e = VM_PAGE_TO_PTE(m) | pmap_sh_attr |
ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
ATTR_CONTIGUOUS | L3_PAGE;
- if ((m->oflags & VPO_UNMANAGED) == 0) {
+ if ((m->oflags & VPO_UNMANAGED) == 0)
l3e |= ATTR_SW_MANAGED;
- l3e &= ~ATTR_AF;
- }
+ else
+ l3e |= ATTR_AF;
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
l3e |= ATTR_S1_XN;
@@ -6293,8 +6298,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pmap_resident_count_inc(pmap, 1);
pa = VM_PAGE_TO_PHYS(m);
- l3_val = PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) |
- ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
+ l3_val = PHYS_TO_PTE(pa) | pmap_sh_attr |
+ ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
l3_val |= pmap_pte_bti(pmap, va);
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
@@ -6312,10 +6317,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
/*
* Now validate mapping with RO protection
*/
- if ((m->oflags & VPO_UNMANAGED) == 0) {
+ if ((m->oflags & VPO_UNMANAGED) == 0)
l3_val |= ATTR_SW_MANAGED;
- l3_val &= ~ATTR_AF;
- }
+ else
+ l3_val |= ATTR_AF;
/* Sync icache before the mapping is stored to PTE */
if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
@@ -8439,9 +8444,9 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size)
/* Insert L2_BLOCK */
l2 = pmap_l1_to_l2(pde, va);
old_l2e |= pmap_load_store(l2,
- PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_XN |
- ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
- L2_BLOCK);
+ PHYS_TO_PTE(pa) | ATTR_AF | pmap_sh_attr |
+ ATTR_S1_XN | ATTR_KERN_GP |
+ ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
va += L2_SIZE;
pa += L2_SIZE;
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
index 1398d07a8e34..b6c8c5c98c23 100644
--- a/sys/arm64/include/armreg.h
+++ b/sys/arm64/include/armreg.h
@@ -2521,6 +2521,8 @@
#define TCR_EL1_CRm 0
#define TCR_EL1_op2 2
/* Bits 63:59 are reserved */
+#define TCR_DS_SHIFT 59
+#define TCR_DS (UL(1) << TCR_DS_SHIFT)
#define TCR_TCMA1_SHIFT 58
#define TCR_TCMA1 (UL(1) << TCR_TCMA1_SHIFT)
#define TCR_TCMA0_SHIFT 57
@@ -2618,14 +2620,6 @@
#define TCR_T0SZ(x) ((x) << TCR_T0SZ_SHIFT)
#define TCR_TxSZ(x) (TCR_T1SZ(x) | TCR_T0SZ(x))
-#define TCR_CACHE_ATTRS ((TCR_IRGN0_WBWA | TCR_IRGN1_WBWA) |\
- (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA))
-#ifdef SMP
-#define TCR_SMP_ATTRS (TCR_SH0_IS | TCR_SH1_IS)
-#else
-#define TCR_SMP_ATTRS 0
-#endif
-
/* TCR_EL12 */
#define TCR_EL12_REG MRS_REG_ALT_NAME(TCR_EL12)
#define TCR_EL12_op0 3
diff --git a/sys/arm64/include/hypervisor.h b/sys/arm64/include/hypervisor.h
index cd00f1198708..037db25a79f4 100644
--- a/sys/arm64/include/hypervisor.h
+++ b/sys/arm64/include/hypervisor.h
@@ -261,6 +261,8 @@
#define VTCR_EL2_HWU61 (1UL << VTCR_EL2_HWU61_SHIFT)
#define VTCR_EL2_HWU62_SHIFT 28
#define VTCR_EL2_HWU62 (1UL << VTCR_EL2_HWU62_SHIFT)
+#define VTCR_EL2_DS_SHIFT 32
+#define VTCR_EL2_DS (0x1UL << VTCR_EL2_DS_SHIFT)
/* VTTBR_EL2 - Virtualization Translation Table Base Register */
#define VTTBR_VMID_MASK 0xffff000000000000
diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h
index dd0861b37623..41e4f1bfe615 100644
--- a/sys/arm64/include/pmap.h
+++ b/sys/arm64/include/pmap.h
@@ -139,6 +139,8 @@ extern struct pmap kernel_pmap_store;
extern vm_pointer_t virtual_avail;
extern vm_pointer_t virtual_end;
+extern pt_entry_t pmap_sh_attr;
+
/*
* Macros to test if a mapping is mappable with an L1 Section mapping
* or an L2 Large Page mapping.
diff --git a/sys/arm64/include/pte.h b/sys/arm64/include/pte.h
index fd6e7355d965..dec43238d0dd 100644
--- a/sys/arm64/include/pte.h
+++ b/sys/arm64/include/pte.h
@@ -122,14 +122,8 @@ typedef uint64_t pt_entry_t; /* page table entry */
#define ATTR_S2_MEMATTR_WB 0xf
#if __has_feature(capabilities)
-/*
- * The aarch64 orr instruction cannot handle ATTR_DEFAULT |
- * ATTR_CAP_RW as a single operand, so separate orr instructions are
- * required for ATTR_CAP_RW.
- */
#define ATTR_CAP_RW (ATTR_LC_ENABLED | ATTR_SC)
#endif
-#define ATTR_DEFAULT (ATTR_AF | ATTR_SH(ATTR_SH_IS))
#define ATTR_DESCR_MASK 3
#define ATTR_DESCR_VALID 1
diff --git a/sys/arm64/include/vmm.h b/sys/arm64/include/vmm.h
index eca282b04abe..2882ad082178 100644
--- a/sys/arm64/include/vmm.h
+++ b/sys/arm64/include/vmm.h
@@ -146,14 +146,30 @@ enum vm_reg_name {
#define VM_INTINFO_HWEXCEPTION (3 << 8)
#define VM_INTINFO_SWINTR (4 << 8)
-#define VM_MAX_SUFFIXLEN 15
-
#define VM_GUEST_BASE_IPA 0x80000000UL /* Guest kernel start ipa */
-#ifdef _KERNEL
-
-#define VM_MAX_NAMELEN 32
+/*
+ * The VM name has to fit into the pathname length constraints of devfs,
+ * governed primarily by SPECNAMELEN. The length is the total number of
+ * characters in the full path, relative to the mount point and not
+ * including any leading '/' characters.
+ * A prefix and a suffix are added to the name specified by the user.
+ * The prefix is usually "vmm/" or "vmm.io/", but can be a few characters
+ * longer for future use.
+ * The suffix is a string that identifies a bootrom image or some similar
+ * image that is attached to the VM. A separator character gets added to
+ * the suffix automatically when generating the full path, so it must be
+ * accounted for, reducing the effective length by 1.
+ * The effective length of a VM name is 229 bytes for FreeBSD 13 and 37
+ * bytes for FreeBSD 12. A minimum length is set for safety and supports
+ * a SPECNAMELEN as small as 32 on old systems.
+ */
+#define VM_MAX_PREFIXLEN 10
+#define VM_MAX_SUFFIXLEN 15
+#define VM_MAX_NAMELEN \
+ (SPECNAMELEN - VM_MAX_PREFIXLEN - VM_MAX_SUFFIXLEN - 1)
+#ifdef _KERNEL
struct vm;
struct vm_exception;
struct vm_exit;
diff --git a/sys/arm64/include/vmm_dev.h b/sys/arm64/include/vmm_dev.h
index 23917bb8b057..17f9cbfcccf4 100644
--- a/sys/arm64/include/vmm_dev.h
+++ b/sys/arm64/include/vmm_dev.h
@@ -27,6 +27,8 @@
#ifndef _VMM_DEV_H_
#define _VMM_DEV_H_
+#include
+
struct vm_memmap {
vm_paddr_t gpa;
int segid; /* memory segment */
diff --git a/sys/arm64/iommu/iommu.c b/sys/arm64/iommu/iommu.c
index b765763e3a60..af0edfee70d8 100644
--- a/sys/arm64/iommu/iommu.c
+++ b/sys/arm64/iommu/iommu.c
@@ -501,6 +501,11 @@ iommu_find(device_t dev, bool verbose)
return (NULL);
}
+void
+iommu_unit_pre_instantiate_ctx(struct iommu_unit *unit)
+{
+}
+
void
iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
bool cansleep __unused)
diff --git a/sys/arm64/iommu/iommu_pmap.c b/sys/arm64/iommu/iommu_pmap.c
index 82b6e1f8f4c7..fb4a35c89bd5 100644
--- a/sys/arm64/iommu/iommu_pmap.c
+++ b/sys/arm64/iommu/iommu_pmap.c
@@ -713,7 +713,7 @@ smmu_pmap_enter(struct smmu_pmap *pmap, vm_offset_t va, vm_paddr_t pa,
KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
va = trunc_page(va);
- new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT |
+ new_l3 = (pt_entry_t)(pa | ATTR_AF | ATTR_SH(ATTR_SH_IS) |
ATTR_S1_IDX(VM_MEMATTR_DEVICE) | IOMMU_L3_PAGE);
if ((prot & VM_PROT_WRITE) == 0)
new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
diff --git a/sys/arm64/iommu/smmu.c b/sys/arm64/iommu/smmu.c
index 93b0cbb7c8e4..a832f6a6ec70 100644
--- a/sys/arm64/iommu/smmu.c
+++ b/sys/arm64/iommu/smmu.c
@@ -960,10 +960,6 @@ smmu_init_strtab_2lvl(struct smmu_softc *sc)
sz = strtab->num_l1_entries * sizeof(struct l1_desc);
strtab->l1 = malloc(sz, M_SMMU, M_WAITOK | M_ZERO);
- if (strtab->l1 == NULL) {
- free(strtab->vaddr, M_SMMU);
- return (ENOMEM);
- }
reg = STRTAB_BASE_CFG_FMT_2LVL;
reg |= size << STRTAB_BASE_CFG_LOG2SIZE_S;
diff --git a/sys/arm64/vmm/vmm_arm64.c b/sys/arm64/vmm/vmm_arm64.c
index 60fdeb5c6d6d..eeea35be3382 100644
--- a/sys/arm64/vmm/vmm_arm64.c
+++ b/sys/arm64/vmm/vmm_arm64.c
@@ -475,6 +475,14 @@ vmmops_modinit(int ipinum)
*/
el2_regs.vtcr_el2 |= VTCR_EL2_HWU59 | VTCR_EL2_HWU60 | VTCR_EL2_HWU61;
#endif
+ /*
+ * If FEAT_LPA2 is enabled in the host then we need to enable it here
+ * so the page tables created by pmap.c are correct. The meaning of
+ * the shareability field changes to become address bits when this
+ * is set.
+ */
+ if ((READ_SPECIALREG(tcr_el1) & TCR_DS) != 0)
+ el2_regs.vtcr_el2 |= VTCR_EL2_DS;
smp_rendezvous(NULL, arm_setup_vectors, NULL, &el2_regs);
diff --git a/sys/arm64/vmm/vmm_mmu.c b/sys/arm64/vmm/vmm_mmu.c
index 3669b796a1c4..c0b9d848af81 100644
--- a/sys/arm64/vmm/vmm_mmu.c
+++ b/sys/arm64/vmm/vmm_mmu.c
@@ -294,7 +294,7 @@ vmmpmap_enter(vm_offset_t va, vm_size_t size, vm_paddr_t pa, vm_prot_t prot)
KASSERT((size & PAGE_MASK) == 0,
("%s: Mapping is not page-sized", __func__));
- l3e = ATTR_DEFAULT | L3_PAGE;
+ l3e = ATTR_AF | ATTR_SH(ATTR_SH_IS) | L3_PAGE;
/* This bit is res1 at EL2 */
l3e |= ATTR_S1_AP(ATTR_S1_AP_USER);
/* Only normal memory is used at EL2 */
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index e23df67b78ea..6fedf790cc02 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -2687,12 +2687,6 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO);
- if (entries == NULL) {
- printf("%s: could not allocate %d bytes for OOA "
- "dump\n", __func__, ooa_hdr->alloc_len);
- retval = ENOMEM;
- break;
- }
mtx_lock(&softc->ctl_lock);
if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 &&
@@ -4636,7 +4630,7 @@ ctl_add_lun(struct ctl_be_lun *be_lun)
ctl_tpc_lun_init(lun);
if (lun->flags & CTL_LUN_REMOVABLE) {
lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4,
- M_CTL, M_WAITOK);
+ M_CTL, M_WAITOK | M_ZERO);
}
/*
diff --git a/sys/cam/mmc/mmc_da.c b/sys/cam/mmc/mmc_da.c
index 597ba0efb47e..fc29a1925c66 100644
--- a/sys/cam/mmc/mmc_da.c
+++ b/sys/cam/mmc/mmc_da.c
@@ -88,9 +88,11 @@ typedef enum {
SDDA_STATE_PART_SWITCH,
} sdda_state;
-#define SDDA_FMT_BOOT "sdda%dboot"
-#define SDDA_FMT_GP "sdda%dgp"
-#define SDDA_FMT_RPMB "sdda%drpmb"
+/* Purposefully ignore a '%d' argument to snprintf in SDDA_FMT! */
+#define SDDA_FMT "%s"
+#define SDDA_FMT_BOOT "%s%dboot"
+#define SDDA_FMT_GP "%s%dgp"
+#define SDDA_FMT_RPMB "%s%drpmb"
#define SDDA_LABEL_ENH "enh"
#define SDDA_PART_NAMELEN (16 + 1)
@@ -1480,7 +1482,7 @@ sdda_start_init(void *context, union ccb *start_ccb)
sdda_process_mmc_partitions(periph, start_ccb);
} else if (mmcp->card_features & CARD_FEATURE_MEMORY) {
/* For SD[HC] cards, just add one partition that is the whole card */
- if (sdda_add_part(periph, 0, "sdda",
+ if (sdda_add_part(periph, 0, SDDA_FMT,
periph->unit_number,
mmc_get_media_size(periph),
sdda_get_read_only(periph, start_ccb)) == false)
@@ -1525,7 +1527,7 @@ sdda_add_part(struct cam_periph *periph, u_int type, const char *name,
part->type = type;
part->ro = ro;
part->sc = sc;
- snprintf(part->name, sizeof(part->name), name, periph->unit_number);
+ snprintf(part->name, sizeof(part->name), name, "sdda", periph->unit_number);
/*
* Due to the nature of RPMB partition it doesn't make much sense
@@ -1592,8 +1594,11 @@ sdda_add_part(struct cam_periph *periph, u_int type, const char *name,
part->disk->d_fwsectors = 0;
part->disk->d_fwheads = 0;
- if (sdda_mmcsd_compat)
- disk_add_alias(part->disk, "mmcsd");
+ if (sdda_mmcsd_compat) {
+ char cname[SDDA_PART_NAMELEN]; /* This equals the mmcsd namelen. */
+ snprintf(cname, sizeof(cname), name, "mmcsd", periph->unit_number);
+ disk_add_alias(part->disk, cname);
+ }
/*
* Acquire a reference to the periph before we register with GEOM.
@@ -1682,7 +1687,7 @@ sdda_process_mmc_partitions(struct cam_periph *periph, union ccb *ccb)
* data area in case partitions are supported.
*/
ro = sdda_get_read_only(periph, ccb);
- sdda_add_part(periph, EXT_CSD_PART_CONFIG_ACC_DEFAULT, "sdda",
+ sdda_add_part(periph, EXT_CSD_PART_CONFIG_ACC_DEFAULT, SDDA_FMT,
periph->unit_number, mmc_get_media_size(periph), ro);
sc->part_curr = EXT_CSD_PART_CONFIG_ACC_DEFAULT;
diff --git a/sys/compat/linuxkpi/common/include/linux/device/driver.h b/sys/compat/linuxkpi/common/include/linux/device/driver.h
new file mode 100644
index 000000000000..03b510c9c8b7
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/device/driver.h
@@ -0,0 +1,33 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Bjoern A. Zeeb
+ * Copyright (c) 2024 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ */
+
+#ifndef LINUXKPI_LINUX_DEVICE_DRIVER_H
+#define LINUXKPI_LINUX_DEVICE_DRIVER_H
+
+#include
+#include
+
+#define module_driver(_drv, _regf, _unregf) \
+static inline int \
+__CONCAT(__CONCAT(_, _drv), _init)(void) \
+{ \
+ return (_regf(&(_drv))); \
+} \
+ \
+static inline void \
+__CONCAT(__CONCAT(_, _drv), _exit)(void) \
+{ \
+ _unregf(&(_drv)); \
+} \
+ \
+module_init(__CONCAT(__CONCAT(_, _drv), _init)); \
+module_exit(__CONCAT(__CONCAT(_, _drv), _exit))
+
+#endif /* LINUXKPI_LINUX_DEVICE_DRIVER_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/errno.h b/sys/compat/linuxkpi/common/include/linux/errno.h
index ea258587c6f7..d634675d43d0 100644
--- a/sys/compat/linuxkpi/common/include/linux/errno.h
+++ b/sys/compat/linuxkpi/common/include/linux/errno.h
@@ -68,5 +68,6 @@
#define ENOMEDIUM 532
#define ENOSR 533
#define ELNRNG 534
+#define ENAVAIL 535
#endif /* _LINUXKPI_LINUX_ERRNO_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/if_ether.h b/sys/compat/linuxkpi/common/include/linux/if_ether.h
index 3735ad2f5527..6676e8fc142f 100644
--- a/sys/compat/linuxkpi/common/include/linux/if_ether.h
+++ b/sys/compat/linuxkpi/common/include/linux/if_ether.h
@@ -34,6 +34,7 @@
#define _LINUXKPI_LINUX_IF_ETHER_H_
#include
+#include
#include
@@ -69,4 +70,13 @@ struct ethhdr {
uint16_t h_proto;
} __packed;
+static inline struct ethhdr *
+eth_hdr(const struct sk_buff *skb)
+{
+ struct ethhdr *hdr;
+
+ hdr = (struct ethhdr *)skb_mac_header(skb);
+ return (hdr);
+}
+
#endif /* _LINUXKPI_LINUX_IF_ETHER_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/pci.h b/sys/compat/linuxkpi/common/include/linux/pci.h
index e3a2970399e5..8977136d6c08 100644
--- a/sys/compat/linuxkpi/common/include/linux/pci.h
+++ b/sys/compat/linuxkpi/common/include/linux/pci.h
@@ -36,6 +36,7 @@
#define CONFIG_PCI_MSI
#include
+#include
#include
#include
@@ -274,24 +275,8 @@ extern spinlock_t pci_lock;
#define __devexit_p(x) x
-#define module_pci_driver(_driver) \
- \
-static inline int \
-_pci_init(void) \
-{ \
- \
- return (linux_pci_register_driver(&_driver)); \
-} \
- \
-static inline void \
-_pci_exit(void) \
-{ \
- \
- linux_pci_unregister_driver(&_driver); \
-} \
- \
-module_init(_pci_init); \
-module_exit(_pci_exit)
+#define module_pci_driver(_drv) \
+ module_driver(_drv, linux_pci_register_driver, linux_pci_unregister_driver)
struct msi_msg {
uint32_t data;
diff --git a/sys/compat/linuxkpi/common/include/linux/printk.h b/sys/compat/linuxkpi/common/include/linux/printk.h
index 933d5aa6f94a..3840a6e5fb8a 100644
--- a/sys/compat/linuxkpi/common/include/linux/printk.h
+++ b/sys/compat/linuxkpi/common/include/linux/printk.h
@@ -125,4 +125,11 @@ print_hex_dump_bytes(const char *prefix_str, const int prefix_type,
#define pr_info_ratelimited(fmt, ...) \
printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+#define no_printk(fmt, ...) \
+({ \
+ if (0) \
+ printk(pr_fmt(fmt), ##__VA_ARGS__); \
+ 0; \
+})
+
#endif /* _LINUXKPI_LINUX_PRINTK_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/random.h b/sys/compat/linuxkpi/common/include/linux/random.h
index 808c5bc55974..893ee2b7b728 100644
--- a/sys/compat/linuxkpi/common/include/linux/random.h
+++ b/sys/compat/linuxkpi/common/include/linux/random.h
@@ -54,6 +54,15 @@ get_random_int(void)
return (val);
}
+static inline uint8_t
+get_random_u8(void)
+{
+ uint8_t val;
+
+ get_random_bytes(&val, sizeof(val));
+ return (val);
+}
+
#define get_random_u32() get_random_int()
/*
diff --git a/sys/compat/linuxkpi/common/src/linux_80211.c b/sys/compat/linuxkpi/common/src/linux_80211.c
index b4844a48483f..84da4cc32f3a 100644
--- a/sys/compat/linuxkpi/common/src/linux_80211.c
+++ b/sys/compat/linuxkpi/common/src/linux_80211.c
@@ -347,7 +347,7 @@ lkpi_lsta_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN],
sta->deflink.smps_mode = IEEE80211_SMPS_OFF;
sta->deflink.bandwidth = IEEE80211_STA_RX_BW_20;
- sta->deflink.rx_nss = 0;
+ sta->deflink.rx_nss = 1;
ht_rx_nss = 0;
#if defined(LKPI_80211_HT)
@@ -4401,8 +4401,6 @@ lkpi_ieee80211_ifalloc(void)
struct ieee80211com *ic;
ic = malloc(sizeof(*ic), M_LKPI80211, M_WAITOK | M_ZERO);
- if (ic == NULL)
- return (NULL);
/* Setting these happens later when we have device information. */
ic->ic_softc = NULL;
@@ -4454,10 +4452,6 @@ linuxkpi_ieee80211_alloc_hw(size_t priv_len, const struct ieee80211_ops *ops)
/* BSD Specific. */
lhw->ic = lkpi_ieee80211_ifalloc();
- if (lhw->ic == NULL) {
- ieee80211_free_hw(hw);
- return (NULL);
- }
IMPROVE();
diff --git a/sys/conf/ldscript.amd64 b/sys/conf/ldscript.amd64
index b3286612b41a..1c7fde2eada0 100644
--- a/sys/conf/ldscript.amd64
+++ b/sys/conf/ldscript.amd64
@@ -93,15 +93,15 @@ SECTIONS
.init_array :
{
PROVIDE_HIDDEN (__init_array_start = .);
- KEEP (*(SORT(.init_array.*)))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*)))
KEEP (*(.init_array))
PROVIDE_HIDDEN (__init_array_end = .);
}
.fini_array :
{
PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*)))
KEEP (*(.fini_array))
- KEEP (*(SORT(.fini_array.*)))
PROVIDE_HIDDEN (__fini_array_end = .);
}
_start_ctors = .;
diff --git a/sys/conf/ldscript.arm b/sys/conf/ldscript.arm
index 0764c99f9042..e8d2db3f854b 100644
--- a/sys/conf/ldscript.arm
+++ b/sys/conf/ldscript.arm
@@ -83,6 +83,20 @@ SECTIONS
}
.data1 : { *(.data1) }
. = ALIGN(32 / 8);
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*)))
+ KEEP (*(.init_array))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*)))
+ KEEP (*(.fini_array))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
_start_ctors = .;
PROVIDE (start_ctors = .);
.ctors :
diff --git a/sys/conf/ldscript.arm64 b/sys/conf/ldscript.arm64
index e4a375653627..a0ff1539d0ab 100644
--- a/sys/conf/ldscript.arm64
+++ b/sys/conf/ldscript.arm64
@@ -104,6 +104,20 @@ SECTIONS
. = ALIGN(128);
.data1 : { *(.data1) }
. = ALIGN(32 / 8);
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*)))
+ KEEP (*(.init_array))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*)))
+ KEEP (*(.fini_array))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
_start_ctors = .;
PROVIDE (start_ctors = .);
.ctors :
diff --git a/sys/conf/ldscript.i386 b/sys/conf/ldscript.i386
index 66bdbc4a80cb..467cba24d43f 100644
--- a/sys/conf/ldscript.i386
+++ b/sys/conf/ldscript.i386
@@ -87,15 +87,15 @@ SECTIONS
.init_array :
{
PROVIDE_HIDDEN (__init_array_start = .);
- KEEP (*(SORT(.init_array.*)))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*)))
KEEP (*(.init_array))
PROVIDE_HIDDEN (__init_array_end = .);
}
.fini_array :
{
PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*)))
KEEP (*(.fini_array))
- KEEP (*(SORT(.fini_array.*)))
PROVIDE_HIDDEN (__fini_array_end = .);
}
_start_ctors = .;
diff --git a/sys/conf/ldscript.powerpc b/sys/conf/ldscript.powerpc
index 0e11dd4459db..3a407a4dbf88 100644
--- a/sys/conf/ldscript.powerpc
+++ b/sys/conf/ldscript.powerpc
@@ -78,8 +78,20 @@ SECTIONS
. = ALIGN(4096);
.got : { *(.got) }
.got.plt : { *(.got.plt) }
-
-
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*)))
+ KEEP (*(.init_array))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*)))
+ KEEP (*(.fini_array))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
.dynamic : { *(.dynamic) } :kernel :dynamic
/* Put .ctors and .dtors next to the .got2 section, so that the pointers
get relocated with -mrelocatable. Also put in the .fixup pointers.
diff --git a/sys/conf/ldscript.powerpc64 b/sys/conf/ldscript.powerpc64
index 58a3dc69931b..a342a48b9daf 100644
--- a/sys/conf/ldscript.powerpc64
+++ b/sys/conf/ldscript.powerpc64
@@ -104,7 +104,20 @@ SECTIONS
. = ALIGN(4096);
.got : ALIGN(8) { __tocbase = .; *(.got) }
.toc : ALIGN(8) { *(.toc) }
-
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*)))
+ KEEP (*(.init_array))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*)))
+ KEEP (*(.fini_array))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
.dynamic : { *(.dynamic) } :kernel :dynamic
/* Put .ctors and .dtors next to the .got2 section, so that the pointers
get relocated with -mrelocatable. Also put in the .fixup pointers.
diff --git a/sys/conf/ldscript.powerpc64le b/sys/conf/ldscript.powerpc64le
index a65b39b3d9eb..1d5f3efe64fd 100644
--- a/sys/conf/ldscript.powerpc64le
+++ b/sys/conf/ldscript.powerpc64le
@@ -104,7 +104,20 @@ SECTIONS
. = ALIGN(4096);
.got : ALIGN(8) { __tocbase = .; *(.got) }
.toc : ALIGN(8) { *(.toc) }
-
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*)))
+ KEEP (*(.init_array))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*)))
+ KEEP (*(.fini_array))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
.dynamic : { *(.dynamic) } :kernel :dynamic
/* Put .ctors and .dtors next to the .got2 section, so that the pointers
get relocated with -mrelocatable. Also put in the .fixup pointers.
diff --git a/sys/conf/ldscript.powerpcspe b/sys/conf/ldscript.powerpcspe
index 66630aaabaae..fa82cbe8330f 100644
--- a/sys/conf/ldscript.powerpcspe
+++ b/sys/conf/ldscript.powerpcspe
@@ -79,8 +79,20 @@ SECTIONS
. = ALIGN(4096);
.got : { *(.got) }
.got.plt : { *(.got.plt) }
-
-
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*)))
+ KEEP (*(.init_array))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*)))
+ KEEP (*(.fini_array))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
.dynamic : { *(.dynamic) } :kernel :dynamic
/* Put .ctors and .dtors next to the .got2 section, so that the pointers
get relocated with -mrelocatable. Also put in the .fixup pointers.
diff --git a/sys/conf/ldscript.riscv b/sys/conf/ldscript.riscv
index b964a8739b7c..c730b423ccc2 100644
--- a/sys/conf/ldscript.riscv
+++ b/sys/conf/ldscript.riscv
@@ -71,6 +71,20 @@ SECTIONS
}
.data1 : { *(.data1) }
. = ALIGN(32 / 8);
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*)))
+ KEEP (*(.init_array))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*)))
+ KEEP (*(.fini_array))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
_start_ctors = .;
PROVIDE (start_ctors = .);
.ctors :
diff --git a/sys/contrib/libnv/nv_impl.h b/sys/contrib/libnv/nv_impl.h
index e9cd3ffabc3f..4ac57fc7b497 100644
--- a/sys/contrib/libnv/nv_impl.h
+++ b/sys/contrib/libnv/nv_impl.h
@@ -42,6 +42,14 @@ struct nvpair;
typedef struct nvpair nvpair_t;
#endif
+struct nvlist_header {
+ uint8_t nvlh_magic;
+ uint8_t nvlh_version;
+ uint8_t nvlh_flags;
+ uint64_t nvlh_descriptors;
+ uint64_t nvlh_size;
+} __packed;
+
#define NV_TYPE_NVLIST_ARRAY_NEXT 254
#define NV_TYPE_NVLIST_UP 255
diff --git a/sys/contrib/libnv/nvlist.c b/sys/contrib/libnv/nvlist.c
index 92d6e655876a..00ba3b41f3a3 100644
--- a/sys/contrib/libnv/nvlist.c
+++ b/sys/contrib/libnv/nvlist.c
@@ -118,13 +118,6 @@ MALLOC_DEFINE(M_NVLIST, "nvlist", "kernel nvlist");
#define NVLIST_HEADER_MAGIC 0x6c
#define NVLIST_HEADER_VERSION 0x00
-struct nvlist_header {
- uint8_t nvlh_magic;
- uint8_t nvlh_version;
- uint8_t nvlh_flags;
- uint64_t nvlh_descriptors;
- uint64_t nvlh_size;
-} __packed;
nvlist_t *
nvlist_create(int flags)
diff --git a/sys/contrib/openzfs/cmd/ztest.c b/sys/contrib/openzfs/cmd/ztest.c
index 6a9264ddcc4c..eb68c27b1dc1 100644
--- a/sys/contrib/openzfs/cmd/ztest.c
+++ b/sys/contrib/openzfs/cmd/ztest.c
@@ -6211,13 +6211,14 @@ void
ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
- nvlist_t *props = NULL;
(void) pthread_rwlock_rdlock(&ztest_name_lock);
(void) ztest_spa_prop_set_uint64(ZPOOL_PROP_AUTOTRIM, ztest_random(2));
- VERIFY0(spa_prop_get(ztest_spa, &props));
+ nvlist_t *props = fnvlist_alloc();
+
+ VERIFY0(spa_prop_get(ztest_spa, props));
if (ztest_opts.zo_verbose >= 6)
dump_nvlist(props, 4);
diff --git a/sys/contrib/openzfs/include/sys/spa.h b/sys/contrib/openzfs/include/sys/spa.h
index 3998f5a6de73..0fa3149e6c6f 100644
--- a/sys/contrib/openzfs/include/sys/spa.h
+++ b/sys/contrib/openzfs/include/sys/spa.h
@@ -1196,9 +1196,9 @@ extern void spa_boot_init(void);
/* properties */
extern int spa_prop_set(spa_t *spa, nvlist_t *nvp);
-extern int spa_prop_get(spa_t *spa, nvlist_t **nvp);
+extern int spa_prop_get(spa_t *spa, nvlist_t *nvp);
extern int spa_prop_get_nvlist(spa_t *spa, char **props,
- unsigned int n_props, nvlist_t **outnvl);
+ unsigned int n_props, nvlist_t *outnvl);
extern void spa_prop_clear_bootfs(spa_t *spa, uint64_t obj, dmu_tx_t *tx);
extern void spa_configfile_set(spa_t *, nvlist_t *, boolean_t);
diff --git a/sys/contrib/openzfs/module/zfs/spa.c b/sys/contrib/openzfs/module/zfs/spa.c
index cafc7196c354..7a3dd29769ca 100644
--- a/sys/contrib/openzfs/module/zfs/spa.c
+++ b/sys/contrib/openzfs/module/zfs/spa.c
@@ -366,21 +366,15 @@ spa_prop_add(spa_t *spa, const char *propname, nvlist_t *outnvl)
int
spa_prop_get_nvlist(spa_t *spa, char **props, unsigned int n_props,
- nvlist_t **outnvl)
+ nvlist_t *outnvl)
{
int err = 0;
if (props == NULL)
return (0);
- if (*outnvl == NULL) {
- err = nvlist_alloc(outnvl, NV_UNIQUE_NAME, KM_SLEEP);
- if (err)
- return (err);
- }
-
for (unsigned int i = 0; i < n_props && err == 0; i++) {
- err = spa_prop_add(spa, props[i], *outnvl);
+ err = spa_prop_add(spa, props[i], outnvl);
}
return (err);
@@ -406,7 +400,7 @@ spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval,
* Get property values from the spa configuration.
*/
static void
-spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
+spa_prop_get_config(spa_t *spa, nvlist_t *nv)
{
vdev_t *rvd = spa->spa_root_vdev;
dsl_pool_t *pool = spa->spa_dsl_pool;
@@ -428,48 +422,48 @@ spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
size += metaslab_class_get_space(spa_dedup_class(spa));
size += metaslab_class_get_space(spa_embedded_log_class(spa));
- spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
+ spa_prop_add_list(nv, ZPOOL_PROP_SIZE, NULL, size, src);
+ spa_prop_add_list(nv, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
+ spa_prop_add_list(nv, ZPOOL_PROP_FREE, NULL,
size - alloc, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_CHECKPOINT, NULL,
spa->spa_checkpoint_info.sci_dspace, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_FRAGMENTATION, NULL,
metaslab_class_fragmentation(mc), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_EXPANDSZ, NULL,
metaslab_class_expandable_space(mc), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_READONLY, NULL,
(spa_mode(spa) == SPA_MODE_READ), src);
cap = (size == 0) ? 0 : (alloc * 100 / size);
- spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
+ spa_prop_add_list(nv, ZPOOL_PROP_CAPACITY, NULL, cap, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_DEDUPRATIO, NULL,
ddt_get_pool_dedup_ratio(spa), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONEUSED, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_BCLONEUSED, NULL,
brt_get_used(spa), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONESAVED, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_BCLONESAVED, NULL,
brt_get_saved(spa), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONERATIO, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_BCLONERATIO, NULL,
brt_get_ratio(spa), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUP_TABLE_SIZE, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_DEDUP_TABLE_SIZE, NULL,
ddt_get_ddt_dsize(spa), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_HEALTH, NULL,
rvd->vdev_state, src);
version = spa_version(spa);
if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_DEFAULT);
} else {
- spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_LOCAL);
}
- spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID,
+ spa_prop_add_list(nv, ZPOOL_PROP_LOAD_GUID,
NULL, spa_load_guid(spa), src);
}
@@ -479,62 +473,62 @@ spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
* when opening pools before this version freedir will be NULL.
*/
if (pool->dp_free_dir != NULL) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_FREEING, NULL,
dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
src);
} else {
- spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
+ spa_prop_add_list(nv, ZPOOL_PROP_FREEING,
NULL, 0, src);
}
if (pool->dp_leak_dir != NULL) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_LEAKED, NULL,
dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
src);
} else {
- spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
+ spa_prop_add_list(nv, ZPOOL_PROP_LEAKED,
NULL, 0, src);
}
}
- spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
+ spa_prop_add_list(nv, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
if (spa->spa_comment != NULL) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
+ spa_prop_add_list(nv, ZPOOL_PROP_COMMENT, spa->spa_comment,
0, ZPROP_SRC_LOCAL);
}
if (spa->spa_compatibility != NULL) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_COMPATIBILITY,
+ spa_prop_add_list(nv, ZPOOL_PROP_COMPATIBILITY,
spa->spa_compatibility, 0, ZPROP_SRC_LOCAL);
}
if (spa->spa_root != NULL)
- spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
+ spa_prop_add_list(nv, ZPOOL_PROP_ALTROOT, spa->spa_root,
0, ZPROP_SRC_LOCAL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
} else {
- spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
}
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MAX_SIZE, ZPROP_SRC_NONE);
} else {
- spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MIN_SIZE, ZPROP_SRC_NONE);
}
if ((dp = list_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path == NULL) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
+ spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE,
"none", 0, ZPROP_SRC_LOCAL);
} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
+ spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE,
dp->scd_path, 0, ZPROP_SRC_LOCAL);
}
}
@@ -544,19 +538,13 @@ spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
* Get zpool property values.
*/
int
-spa_prop_get(spa_t *spa, nvlist_t **nvp)
+spa_prop_get(spa_t *spa, nvlist_t *nv)
{
objset_t *mos = spa->spa_meta_objset;
zap_cursor_t zc;
zap_attribute_t za;
dsl_pool_t *dp;
- int err;
-
- if (*nvp == NULL) {
- err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
- if (err)
- return (err);
- }
+ int err = 0;
dp = spa_get_dsl(spa);
dsl_pool_config_enter(dp, FTAG);
@@ -565,7 +553,7 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp)
/*
* Get properties from the spa config.
*/
- spa_prop_get_config(spa, nvp);
+ spa_prop_get_config(spa, nv);
/* If no pool property object, no more prop to get. */
if (mos == NULL || spa->spa_pool_props_object == 0)
@@ -610,7 +598,7 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp)
intval = za.za_first_integer;
}
- spa_prop_add_list(*nvp, prop, strval, intval, src);
+ spa_prop_add_list(nv, prop, strval, intval, src);
if (strval != NULL)
kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
@@ -627,10 +615,10 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp)
break;
}
if (prop != ZPOOL_PROP_INVAL) {
- spa_prop_add_list(*nvp, prop, strval, 0, src);
+ spa_prop_add_list(nv, prop, strval, 0, src);
} else {
src = ZPROP_SRC_LOCAL;
- spa_prop_add_user(*nvp, za.za_name, strval,
+ spa_prop_add_user(nv, za.za_name, strval,
src);
}
kmem_free(strval, za.za_num_integers);
@@ -644,11 +632,9 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp)
out:
mutex_exit(&spa->spa_props_lock);
dsl_pool_config_exit(dp, FTAG);
- if (err && err != ENOENT) {
- nvlist_free(*nvp);
- *nvp = NULL;
+
+ if (err && err != ENOENT)
return (err);
- }
return (0);
}
diff --git a/sys/contrib/openzfs/module/zfs/zfs_ioctl.c b/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
index 897335dd4e4f..3e2fb73b11ed 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
@@ -3022,7 +3022,6 @@ static const zfs_ioc_key_t zfs_keys_get_props[] = {
static int
zfs_ioc_pool_get_props(const char *pool, nvlist_t *innvl, nvlist_t *outnvl)
{
- nvlist_t *nvp = outnvl;
spa_t *spa;
char **props = NULL;
unsigned int n_props = 0;
@@ -3041,16 +3040,17 @@ zfs_ioc_pool_get_props(const char *pool, nvlist_t *innvl, nvlist_t *outnvl)
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(pool)) != NULL) {
- error = spa_prop_get(spa, &nvp);
+ error = spa_prop_get(spa, outnvl);
if (error == 0 && props != NULL)
error = spa_prop_get_nvlist(spa, props, n_props,
- &nvp);
+ outnvl);
}
mutex_exit(&spa_namespace_lock);
} else {
- error = spa_prop_get(spa, &nvp);
+ error = spa_prop_get(spa, outnvl);
if (error == 0 && props != NULL)
- error = spa_prop_get_nvlist(spa, props, n_props, &nvp);
+ error = spa_prop_get_nvlist(spa, props, n_props,
+ outnvl);
spa_close(spa, FTAG);
}
diff --git a/sys/contrib/rdma/krping/krping_dev.c b/sys/contrib/rdma/krping/krping_dev.c
index eea3c772ea4f..59aa19672443 100644
--- a/sys/contrib/rdma/krping/krping_dev.c
+++ b/sys/contrib/rdma/krping/krping_dev.c
@@ -174,12 +174,7 @@ krping_write(struct cdev *dev, struct uio *uio, int ioflag)
char *cp;
krping_t *krpingmsg;
- krpingmsg = malloc(sizeof *krpingmsg, M_DEVBUF, M_WAITOK|M_ZERO);
- if (!krpingmsg) {
- uprintf("Could not malloc mem!\n");
- return ENOMEM;
- }
-
+ krpingmsg = malloc(sizeof *krpingmsg, M_DEVBUF, M_WAITOK | M_ZERO);
cp = krpingmsg->msg;
while (uio->uio_resid) {
amt = MIN(uio->uio_resid, remain);
diff --git a/sys/contrib/subrepo-openzfs/cmd/ztest.c b/sys/contrib/subrepo-openzfs/cmd/ztest.c
index 6a9264ddcc4c..eb68c27b1dc1 100644
--- a/sys/contrib/subrepo-openzfs/cmd/ztest.c
+++ b/sys/contrib/subrepo-openzfs/cmd/ztest.c
@@ -6211,13 +6211,14 @@ void
ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
- nvlist_t *props = NULL;
(void) pthread_rwlock_rdlock(&ztest_name_lock);
(void) ztest_spa_prop_set_uint64(ZPOOL_PROP_AUTOTRIM, ztest_random(2));
- VERIFY0(spa_prop_get(ztest_spa, &props));
+ nvlist_t *props = fnvlist_alloc();
+
+ VERIFY0(spa_prop_get(ztest_spa, props));
if (ztest_opts.zo_verbose >= 6)
dump_nvlist(props, 4);
diff --git a/sys/contrib/subrepo-openzfs/include/sys/spa.h b/sys/contrib/subrepo-openzfs/include/sys/spa.h
index 9e667b38dbb1..9da8b5da42c5 100644
--- a/sys/contrib/subrepo-openzfs/include/sys/spa.h
+++ b/sys/contrib/subrepo-openzfs/include/sys/spa.h
@@ -1196,9 +1196,9 @@ extern void spa_boot_init(void);
/* properties */
extern int spa_prop_set(spa_t *spa, nvlist_t *nvp);
-extern int spa_prop_get(spa_t *spa, nvlist_t **nvp);
+extern int spa_prop_get(spa_t *spa, nvlist_t *nvp);
extern int spa_prop_get_nvlist(spa_t *spa, char **props,
- unsigned int n_props, nvlist_t **outnvl);
+ unsigned int n_props, nvlist_t *outnvl);
extern void spa_prop_clear_bootfs(spa_t *spa, uint64_t obj, dmu_tx_t *tx);
extern void spa_configfile_set(spa_t *, nvlist_t *, boolean_t);
diff --git a/sys/contrib/subrepo-openzfs/module/zfs/spa.c b/sys/contrib/subrepo-openzfs/module/zfs/spa.c
index cafc7196c354..7a3dd29769ca 100644
--- a/sys/contrib/subrepo-openzfs/module/zfs/spa.c
+++ b/sys/contrib/subrepo-openzfs/module/zfs/spa.c
@@ -366,21 +366,15 @@ spa_prop_add(spa_t *spa, const char *propname, nvlist_t *outnvl)
int
spa_prop_get_nvlist(spa_t *spa, char **props, unsigned int n_props,
- nvlist_t **outnvl)
+ nvlist_t *outnvl)
{
int err = 0;
if (props == NULL)
return (0);
- if (*outnvl == NULL) {
- err = nvlist_alloc(outnvl, NV_UNIQUE_NAME, KM_SLEEP);
- if (err)
- return (err);
- }
-
for (unsigned int i = 0; i < n_props && err == 0; i++) {
- err = spa_prop_add(spa, props[i], *outnvl);
+ err = spa_prop_add(spa, props[i], outnvl);
}
return (err);
@@ -406,7 +400,7 @@ spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval,
* Get property values from the spa configuration.
*/
static void
-spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
+spa_prop_get_config(spa_t *spa, nvlist_t *nv)
{
vdev_t *rvd = spa->spa_root_vdev;
dsl_pool_t *pool = spa->spa_dsl_pool;
@@ -428,48 +422,48 @@ spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
size += metaslab_class_get_space(spa_dedup_class(spa));
size += metaslab_class_get_space(spa_embedded_log_class(spa));
- spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
+ spa_prop_add_list(nv, ZPOOL_PROP_SIZE, NULL, size, src);
+ spa_prop_add_list(nv, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
+ spa_prop_add_list(nv, ZPOOL_PROP_FREE, NULL,
size - alloc, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_CHECKPOINT, NULL,
spa->spa_checkpoint_info.sci_dspace, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_FRAGMENTATION, NULL,
metaslab_class_fragmentation(mc), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_EXPANDSZ, NULL,
metaslab_class_expandable_space(mc), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_READONLY, NULL,
(spa_mode(spa) == SPA_MODE_READ), src);
cap = (size == 0) ? 0 : (alloc * 100 / size);
- spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
+ spa_prop_add_list(nv, ZPOOL_PROP_CAPACITY, NULL, cap, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_DEDUPRATIO, NULL,
ddt_get_pool_dedup_ratio(spa), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONEUSED, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_BCLONEUSED, NULL,
brt_get_used(spa), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONESAVED, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_BCLONESAVED, NULL,
brt_get_saved(spa), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONERATIO, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_BCLONERATIO, NULL,
brt_get_ratio(spa), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUP_TABLE_SIZE, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_DEDUP_TABLE_SIZE, NULL,
ddt_get_ddt_dsize(spa), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_HEALTH, NULL,
rvd->vdev_state, src);
version = spa_version(spa);
if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_DEFAULT);
} else {
- spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_LOCAL);
}
- spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID,
+ spa_prop_add_list(nv, ZPOOL_PROP_LOAD_GUID,
NULL, spa_load_guid(spa), src);
}
@@ -479,62 +473,62 @@ spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
* when opening pools before this version freedir will be NULL.
*/
if (pool->dp_free_dir != NULL) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_FREEING, NULL,
dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
src);
} else {
- spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
+ spa_prop_add_list(nv, ZPOOL_PROP_FREEING,
NULL, 0, src);
}
if (pool->dp_leak_dir != NULL) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_LEAKED, NULL,
dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
src);
} else {
- spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
+ spa_prop_add_list(nv, ZPOOL_PROP_LEAKED,
NULL, 0, src);
}
}
- spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
+ spa_prop_add_list(nv, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
if (spa->spa_comment != NULL) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
+ spa_prop_add_list(nv, ZPOOL_PROP_COMMENT, spa->spa_comment,
0, ZPROP_SRC_LOCAL);
}
if (spa->spa_compatibility != NULL) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_COMPATIBILITY,
+ spa_prop_add_list(nv, ZPOOL_PROP_COMPATIBILITY,
spa->spa_compatibility, 0, ZPROP_SRC_LOCAL);
}
if (spa->spa_root != NULL)
- spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
+ spa_prop_add_list(nv, ZPOOL_PROP_ALTROOT, spa->spa_root,
0, ZPROP_SRC_LOCAL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
} else {
- spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
}
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MAX_SIZE, ZPROP_SRC_NONE);
} else {
- spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
+ spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MIN_SIZE, ZPROP_SRC_NONE);
}
if ((dp = list_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path == NULL) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
+ spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE,
"none", 0, ZPROP_SRC_LOCAL);
} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
- spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
+ spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE,
dp->scd_path, 0, ZPROP_SRC_LOCAL);
}
}
@@ -544,19 +538,13 @@ spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
* Get zpool property values.
*/
int
-spa_prop_get(spa_t *spa, nvlist_t **nvp)
+spa_prop_get(spa_t *spa, nvlist_t *nv)
{
objset_t *mos = spa->spa_meta_objset;
zap_cursor_t zc;
zap_attribute_t za;
dsl_pool_t *dp;
- int err;
-
- if (*nvp == NULL) {
- err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
- if (err)
- return (err);
- }
+ int err = 0;
dp = spa_get_dsl(spa);
dsl_pool_config_enter(dp, FTAG);
@@ -565,7 +553,7 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp)
/*
* Get properties from the spa config.
*/
- spa_prop_get_config(spa, nvp);
+ spa_prop_get_config(spa, nv);
/* If no pool property object, no more prop to get. */
if (mos == NULL || spa->spa_pool_props_object == 0)
@@ -610,7 +598,7 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp)
intval = za.za_first_integer;
}
- spa_prop_add_list(*nvp, prop, strval, intval, src);
+ spa_prop_add_list(nv, prop, strval, intval, src);
if (strval != NULL)
kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
@@ -627,10 +615,10 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp)
break;
}
if (prop != ZPOOL_PROP_INVAL) {
- spa_prop_add_list(*nvp, prop, strval, 0, src);
+ spa_prop_add_list(nv, prop, strval, 0, src);
} else {
src = ZPROP_SRC_LOCAL;
- spa_prop_add_user(*nvp, za.za_name, strval,
+ spa_prop_add_user(nv, za.za_name, strval,
src);
}
kmem_free(strval, za.za_num_integers);
@@ -644,11 +632,9 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp)
out:
mutex_exit(&spa->spa_props_lock);
dsl_pool_config_exit(dp, FTAG);
- if (err && err != ENOENT) {
- nvlist_free(*nvp);
- *nvp = NULL;
+
+ if (err && err != ENOENT)
return (err);
- }
return (0);
}
diff --git a/sys/contrib/subrepo-openzfs/module/zfs/zfs_ioctl.c b/sys/contrib/subrepo-openzfs/module/zfs/zfs_ioctl.c
index 8f9d1a360f98..88a188623f7a 100644
--- a/sys/contrib/subrepo-openzfs/module/zfs/zfs_ioctl.c
+++ b/sys/contrib/subrepo-openzfs/module/zfs/zfs_ioctl.c
@@ -3022,7 +3022,6 @@ static const zfs_ioc_key_t zfs_keys_get_props[] = {
static int
zfs_ioc_pool_get_props(const char *pool, nvlist_t *innvl, nvlist_t *outnvl)
{
- nvlist_t *nvp = outnvl;
spa_t *spa;
char **props = NULL;
unsigned int n_props = 0;
@@ -3041,16 +3040,17 @@ zfs_ioc_pool_get_props(const char *pool, nvlist_t *innvl, nvlist_t *outnvl)
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(pool)) != NULL) {
- error = spa_prop_get(spa, &nvp);
+ error = spa_prop_get(spa, outnvl);
if (error == 0 && props != NULL)
error = spa_prop_get_nvlist(spa, props, n_props,
- &nvp);
+ outnvl);
}
mutex_exit(&spa_namespace_lock);
} else {
- error = spa_prop_get(spa, &nvp);
+ error = spa_prop_get(spa, outnvl);
if (error == 0 && props != NULL)
- error = spa_prop_get_nvlist(spa, props, n_props, &nvp);
+ error = spa_prop_get_nvlist(spa, props, n_props,
+ outnvl);
spa_close(spa, FTAG);
}
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c
index 80223a4ab900..bc1b0039af60 100644
--- a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c
@@ -278,8 +278,6 @@ vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
bi = malloc(sizeof(*bi), M_VCPAGELIST, M_WAITOK | M_ZERO);
- if (bi == NULL)
- return VCHIQ_ERROR;
ret = create_pagelist((char __user *)offset, size,
(dir == VCHIQ_BULK_RECEIVE)
diff --git a/sys/dev/ae/if_ae.c b/sys/dev/ae/if_ae.c
index e424e1bd0e76..adbb3e48a4e3 100644
--- a/sys/dev/ae/if_ae.c
+++ b/sys/dev/ae/if_ae.c
@@ -362,12 +362,6 @@ ae_attach(device_t dev)
*/
sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->tq);
- if (sc->tq == NULL) {
- device_printf(dev, "could not create taskqueue.\n");
- ether_ifdetach(ifp);
- error = ENXIO;
- goto fail;
- }
taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->dev));
diff --git a/sys/dev/age/if_age.c b/sys/dev/age/if_age.c
index 6630f2cf782d..10f99129401a 100644
--- a/sys/dev/age/if_age.c
+++ b/sys/dev/age/if_age.c
@@ -628,12 +628,6 @@ age_attach(device_t dev)
/* Create local taskq. */
sc->age_tq = taskqueue_create_fast("age_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->age_tq);
- if (sc->age_tq == NULL) {
- device_printf(dev, "could not create taskqueue.\n");
- ether_ifdetach(ifp);
- error = ENXIO;
- goto fail;
- }
taskqueue_start_threads(&sc->age_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->age_dev));
diff --git a/sys/dev/al_eth/al_eth.c b/sys/dev/al_eth/al_eth.c
index b8dd95e7ca58..f4fec7c6aa94 100644
--- a/sys/dev/al_eth/al_eth.c
+++ b/sys/dev/al_eth/al_eth.c
@@ -1580,7 +1580,6 @@ al_eth_rx_recv_work(void *arg, int pending)
{
struct al_eth_ring *rx_ring = arg;
struct mbuf *mbuf;
- struct lro_entry *queued;
unsigned int qid = rx_ring->ring_id;
struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt;
uint16_t next_to_clean = rx_ring->next_to_clean;
@@ -1671,10 +1670,7 @@ al_eth_rx_recv_work(void *arg, int pending)
"%s: not filling rx queue %d\n", __func__, qid);
}
- while (((queued = LIST_FIRST(&rx_ring->lro.lro_active)) != NULL)) {
- LIST_REMOVE(queued, next);
- tcp_lro_flush(&rx_ring->lro, queued);
- }
+ tcp_lro_flush_all(&rx_ring->lro);
if (napi != 0) {
rx_ring->enqueue_is_running = 0;
@@ -2004,14 +2000,6 @@ al_eth_enable_msix(struct al_eth_adapter *adapter)
adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries),
M_IFAL, M_ZERO | M_WAITOK);
-
- if (adapter->msix_entries == NULL) {
- device_printf_dbg(adapter->dev, "failed to allocate"
- " msix_entries %d\n", msix_vecs);
- rc = ENOMEM;
- goto exit;
- }
-
/* management vector (GROUP_A) @2*/
adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2;
adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0;
@@ -2299,9 +2287,6 @@ al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid)
size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count;
tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
- if (tx_ring->tx_buffer_info == NULL)
- return (ENOMEM);
-
tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc);
q_params->size = tx_ring->hw_count;
@@ -2324,10 +2309,6 @@ al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid)
mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF);
tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK,
&tx_ring->br_mtx);
- if (tx_ring->br == NULL) {
- device_printf(dev, "Critical Failure setting up buf ring\n");
- return (ENOMEM);
- }
/* Allocate taskqueues */
TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring);
@@ -2476,9 +2457,6 @@ al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
size += 1;
rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
- if (rx_ring->rx_buffer_info == NULL)
- return (ENOMEM);
-
rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc);
q_params->size = rx_ring->hw_count;
diff --git a/sys/dev/alc/if_alc.c b/sys/dev/alc/if_alc.c
index 859d1214b46a..07ba02c33c88 100644
--- a/sys/dev/alc/if_alc.c
+++ b/sys/dev/alc/if_alc.c
@@ -1639,12 +1639,6 @@ alc_attach(device_t dev)
/* Create local taskq. */
sc->alc_tq = taskqueue_create_fast("alc_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->alc_tq);
- if (sc->alc_tq == NULL) {
- device_printf(dev, "could not create taskqueue.\n");
- ether_ifdetach(ifp);
- error = ENXIO;
- goto fail;
- }
taskqueue_start_threads(&sc->alc_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->alc_dev));
diff --git a/sys/dev/ale/if_ale.c b/sys/dev/ale/if_ale.c
index 5b3ae438810c..e4d61e636f8b 100644
--- a/sys/dev/ale/if_ale.c
+++ b/sys/dev/ale/if_ale.c
@@ -655,12 +655,6 @@ ale_attach(device_t dev)
/* Create local taskq. */
sc->ale_tq = taskqueue_create_fast("ale_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->ale_tq);
- if (sc->ale_tq == NULL) {
- device_printf(dev, "could not create taskqueue.\n");
- ether_ifdetach(ifp);
- error = ENXIO;
- goto fail;
- }
taskqueue_start_threads(&sc->ale_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->ale_dev));
diff --git a/sys/dev/altera/msgdma/msgdma.c b/sys/dev/altera/msgdma/msgdma.c
index 8b983c5f61ef..a08a00ce0736 100644
--- a/sys/dev/altera/msgdma/msgdma.c
+++ b/sys/dev/altera/msgdma/msgdma.c
@@ -356,11 +356,6 @@ msgdma_desc_alloc(struct msgdma_softc *sc, struct msgdma_channel *chan,
/* Descriptors. */
chan->descs = malloc(nsegments * sizeof(struct msgdma_desc *),
M_DEVBUF, (M_WAITOK | M_ZERO));
- if (chan->descs == NULL) {
- device_printf(sc->dev,
- "%s: Can't allocate memory.\n", __func__);
- return (-1);
- }
chan->dma_map = malloc(nsegments * sizeof(bus_dmamap_t),
M_DEVBUF, (M_WAITOK | M_ZERO));
chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
diff --git a/sys/dev/ath/if_ath_lna_div.c b/sys/dev/ath/if_ath_lna_div.c
index 1b20591fc64e..0755bb667716 100644
--- a/sys/dev/ath/if_ath_lna_div.c
+++ b/sys/dev/ath/if_ath_lna_div.c
@@ -96,12 +96,6 @@ ath_lna_div_attach(struct ath_softc *sc)
ss = malloc(sizeof(struct if_ath_ant_comb_state),
M_TEMP, M_WAITOK | M_ZERO);
- if (ss == NULL) {
- device_printf(sc->sc_dev, "%s: failed to allocate\n",
- __func__);
- /* Don't fail at this point */
- return (0);
- }
/* Fetch the hardware configuration */
OS_MEMZERO(&div_ant_conf, sizeof(div_ant_conf));
diff --git a/sys/dev/ath/if_ath_pci.c b/sys/dev/ath/if_ath_pci.c
index 72f0a802aa5f..a242eab7a694 100644
--- a/sys/dev/ath/if_ath_pci.c
+++ b/sys/dev/ath/if_ath_pci.c
@@ -269,11 +269,6 @@ ath_pci_attach(device_t dev)
__func__, fw->data);
sc->sc_eepromdata =
malloc(fw->datasize, M_TEMP, M_WAITOK | M_ZERO);
- if (! sc->sc_eepromdata) {
- device_printf(dev, "%s: can't malloc eepromdata\n",
- __func__);
- goto bad4;
- }
memcpy(sc->sc_eepromdata, fw->data, fw->datasize);
firmware_put(fw, 0);
}
diff --git a/sys/dev/ath/if_ath_spectral.c b/sys/dev/ath/if_ath_spectral.c
index 58f21b526e93..951d66605981 100644
--- a/sys/dev/ath/if_ath_spectral.c
+++ b/sys/dev/ath/if_ath_spectral.c
@@ -112,13 +112,6 @@ ath_spectral_attach(struct ath_softc *sc)
ss = malloc(sizeof(struct ath_spectral_state),
M_TEMP, M_WAITOK | M_ZERO);
-
- if (ss == NULL) {
- device_printf(sc->sc_dev, "%s: failed to alloc memory\n",
- __func__);
- return (-ENOMEM);
- }
-
sc->sc_spectral = ss;
(void) ath_hal_spectral_get_config(sc->sc_ah, &ss->spectral_state);
diff --git a/sys/dev/axgbe/if_axgbe_pci.c b/sys/dev/axgbe/if_axgbe_pci.c
index 320799e77188..d3078a1c33c1 100644
--- a/sys/dev/axgbe/if_axgbe_pci.c
+++ b/sys/dev/axgbe/if_axgbe_pci.c
@@ -561,11 +561,6 @@ axgbe_if_attach_pre(if_ctx_t ctx)
/* create the workqueue */
pdata->dev_workqueue = taskqueue_create("axgbe", M_WAITOK,
taskqueue_thread_enqueue, &pdata->dev_workqueue);
- if (pdata->dev_workqueue == NULL) {
- axgbe_error("Unable to allocate workqueue\n");
- ret = ENOMEM;
- goto free_channels;
- }
ret = taskqueue_start_threads(&pdata->dev_workqueue, 1, PI_NET,
"axgbe dev taskq");
if (ret) {
@@ -581,8 +576,6 @@ axgbe_if_attach_pre(if_ctx_t ctx)
free_task_queue:
taskqueue_free(pdata->dev_workqueue);
-
-free_channels:
axgbe_free_channels(sc);
release_bus_resource:
diff --git a/sys/dev/axgbe/xgbe-phy-v2.c b/sys/dev/axgbe/xgbe-phy-v2.c
index 5b39d61694e6..8c6069f83076 100644
--- a/sys/dev/axgbe/xgbe-phy-v2.c
+++ b/sys/dev/axgbe/xgbe-phy-v2.c
@@ -3771,8 +3771,6 @@ xgbe_phy_init(struct xgbe_prv_data *pdata)
return (ret);
phy_data = malloc(sizeof(*phy_data), M_AXGBE, M_WAITOK | M_ZERO);
- if (!phy_data)
- return (-ENOMEM);
pdata->phy_data = phy_data;
phy_data->port_mode = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_MODE);
diff --git a/sys/dev/bge/if_bge.c b/sys/dev/bge/if_bge.c
index 23259179cc62..6c3301b1473a 100644
--- a/sys/dev/bge/if_bge.c
+++ b/sys/dev/bge/if_bge.c
@@ -3890,12 +3890,6 @@ bge_attach(device_t dev)
~BGE_MSIMODE_ONE_SHOT_DISABLE);
sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->bge_tq);
- if (sc->bge_tq == NULL) {
- device_printf(dev, "could not create taskqueue.\n");
- ether_ifdetach(ifp);
- error = ENOMEM;
- goto fail;
- }
error = taskqueue_start_threads(&sc->bge_tq, 1, PI_NET,
"%s taskq", device_get_nameunit(sc->bge_dev));
if (error != 0) {
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_mgmt.c b/sys/dev/bnxt/bnxt_en/bnxt_mgmt.c
index c188fd4e269d..d200ea5ce9f2 100644
--- a/sys/dev/bnxt/bnxt_en/bnxt_mgmt.c
+++ b/sys/dev/bnxt/bnxt_en/bnxt_mgmt.c
@@ -205,19 +205,7 @@ bnxt_mgmt_process_hwrm(struct cdev *dev, u_long cmd, caddr_t data,
}
req = malloc(msg_temp.len_req, M_BNXT, M_WAITOK | M_ZERO);
- if(!req) {
- device_printf(softc->dev, "%s:%d Memory allocation failed",
- __FUNCTION__, __LINE__);
- return -ENOMEM;
- }
-
resp = malloc(msg_temp.len_resp, M_BNXT, M_WAITOK | M_ZERO);
- if(!resp) {
- device_printf(softc->dev, "%s:%d Memory allocation failed",
- __FUNCTION__, __LINE__);
- ret = -ENOMEM;
- goto end;
- }
if (copyin((void * __capability)msg_temp.usr_req, req, msg_temp.len_req)) {
device_printf(softc->dev, "%s:%d Failed to copy data from user\n",
@@ -237,12 +225,6 @@ bnxt_mgmt_process_hwrm(struct cdev *dev, u_long cmd, caddr_t data,
(num_ind * sizeof(struct dma_info));
msg2 = malloc(size, M_BNXT, M_WAITOK | M_ZERO);
- if(!msg2) {
- device_printf(softc->dev, "%s:%d Memory allocation failed",
- __FUNCTION__, __LINE__);
- ret = -ENOMEM;
- goto end;
- }
if (copyin((void * __capability)mgmt_req.req.hreq, msg2, size)) {
device_printf(softc->dev, "%s:%d Failed to copy"
diff --git a/sys/dev/bxe/bxe.c b/sys/dev/bxe/bxe.c
index 26a87354e5b1..3e7120a42a90 100644
--- a/sys/dev/bxe/bxe.c
+++ b/sys/dev/bxe/bxe.c
@@ -12174,7 +12174,7 @@ bxe_set_rx_mode(struct bxe_softc *sc)
if (if_getflags(ifp) & IFF_PROMISC) {
rx_mode = BXE_RX_MODE_PROMISC;
} else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
- ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
+ (if_llmaddr_count(ifp) > BXE_MAX_MULTICAST &&
CHIP_IS_E1(sc))) {
rx_mode = BXE_RX_MODE_ALLMULTI;
} else {
diff --git a/sys/dev/cas/if_cas.c b/sys/dev/cas/if_cas.c
index 76d1b713e5bb..1f684097bd3a 100644
--- a/sys/dev/cas/if_cas.c
+++ b/sys/dev/cas/if_cas.c
@@ -205,11 +205,6 @@ cas_attach(struct cas_softc *sc)
TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp);
sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->sc_tq);
- if (sc->sc_tq == NULL) {
- device_printf(sc->sc_dev, "could not create taskqueue\n");
- error = ENXIO;
- goto fail_ifnet;
- }
error = taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->sc_dev));
if (error != 0) {
@@ -462,7 +457,6 @@ cas_attach(struct cas_softc *sc)
bus_dma_tag_destroy(sc->sc_pdmatag);
fail_taskq:
taskqueue_free(sc->sc_tq);
- fail_ifnet:
if_free(ifp);
return (error);
}
diff --git a/sys/dev/cxgb/cxgb_main.c b/sys/dev/cxgb/cxgb_main.c
index 5bc1fa3a0621..2638990cf223 100644
--- a/sys/dev/cxgb/cxgb_main.c
+++ b/sys/dev/cxgb/cxgb_main.c
@@ -2472,9 +2472,7 @@ set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
aligned_len = (len + (offset & 3) + 3) & ~3;
if (aligned_offset != offset || aligned_len != len) {
- buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
- if (!buf)
- return (ENOMEM);
+ buf = malloc(aligned_len, M_DEVBUF, M_WAITOK | M_ZERO);
err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
if (!err && aligned_len > 4)
err = t3_seeprom_read(adapter,
diff --git a/sys/dev/cxgb/cxgb_sge.c b/sys/dev/cxgb/cxgb_sge.c
index 1b82f2ebcaae..52ffa5cdaffa 100644
--- a/sys/dev/cxgb/cxgb_sge.c
+++ b/sys/dev/cxgb/cxgb_sge.c
@@ -2419,11 +2419,8 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
q->port = pi;
q->adap = sc;
- if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
- M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
- device_printf(sc->dev, "failed to allocate mbuf ring\n");
- goto err;
- }
+ q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
+ M_DEVBUF, M_WAITOK, &q->lock);
if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
M_NOWAIT | M_ZERO)) == NULL) {
device_printf(sc->dev, "failed to allocate ifq\n");
diff --git a/sys/dev/cxgbe/cxgbei/cxgbei.c b/sys/dev/cxgbe/cxgbei/cxgbei.c
index 04454a98e247..ccca45f5f761 100644
--- a/sys/dev/cxgbe/cxgbei/cxgbei.c
+++ b/sys/dev/cxgbe/cxgbei/cxgbei.c
@@ -842,9 +842,6 @@ cxgbei_activate(struct adapter *sc)
/* per-adapter softc for iSCSI */
ci = malloc(sizeof(*ci), M_CXGBE, M_ZERO | M_WAITOK);
- if (ci == NULL)
- return (ENOMEM);
-
rc = cxgbei_init(sc, ci);
if (rc != 0) {
free(ci, M_CXGBE);
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index c3fc65f5e235..99176e3a137b 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -1920,6 +1920,9 @@ t4_detach_common(device_t dev)
static inline int
stop_adapter(struct adapter *sc)
{
+ struct port_info *pi;
+ int i;
+
if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_STOPPED))) {
CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x, EALREADY\n",
__func__, curthread, sc->flags, sc->error_flags);
@@ -1927,7 +1930,24 @@ stop_adapter(struct adapter *sc)
}
CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x\n", __func__, curthread,
sc->flags, sc->error_flags);
- return (t4_shutdown_adapter(sc));
+ t4_shutdown_adapter(sc);
+ for_each_port(sc, i) {
+ pi = sc->port[i];
+ PORT_LOCK(pi);
+ if (pi->up_vis > 0 && pi->link_cfg.link_ok) {
+ /*
+ * t4_shutdown_adapter has already shut down all the
+ * PHYs but it also disables interrupts and DMA so there
+ * won't be a link interrupt. Update the state manually
+ * if the link was up previously and inform the kernel.
+ */
+ pi->link_cfg.link_ok = false;
+ t4_os_link_changed(pi);
+ }
+ PORT_UNLOCK(pi);
+ }
+
+ return (0);
}
static inline int
@@ -2020,20 +2040,6 @@ stop_lld(struct adapter *sc)
for_each_port(sc, i) {
pi = sc->port[i];
pi->vxlan_tcam_entry = false;
-
- PORT_LOCK(pi);
- if (pi->up_vis > 0) {
- /*
- * t4_shutdown_adapter has already shut down all the
- * PHYs but it also disables interrupts and DMA so there
- * won't be a link interrupt. So we update the state
- * manually and inform the kernel.
- */
- pi->link_cfg.link_ok = false;
- t4_os_link_changed(pi);
- }
- PORT_UNLOCK(pi);
-
for_each_vi(pi, j, vi) {
vi->xact_addr_filt = -1;
mtx_lock(&vi->tick_mtx);
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index def0d2573311..5f3b06fda150 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -2128,11 +2128,6 @@ alloc_aiotx_mbuf(struct kaiocb *job, int len)
break;
m = mb_alloc_ext_pgs(M_WAITOK, aiotx_free_pgs);
- if (m == NULL) {
- vm_page_unhold_pages(pgs, npages);
- break;
- }
-
m->m_epg_1st_off = pgoff;
m->m_epg_npgs = npages;
if (npages == 1) {
diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c
index f91d193c0fed..8a39218a55ba 100644
--- a/sys/dev/cxgbe/tom/t4_listen.c
+++ b/sys/dev/cxgbe/tom/t4_listen.c
@@ -1494,18 +1494,20 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss,
synqe->tid = tid;
synqe->syn = m;
m = NULL;
+ mtx_lock(&td->toep_list_lock);
+ TAILQ_INSERT_TAIL(&td->synqe_list, synqe, link);
+ mtx_unlock(&td->toep_list_lock);
if (send_synack(sc, synqe, opt0, opt2, tid) != 0) {
remove_tid(sc, tid, ntids);
m = synqe->syn;
synqe->syn = NULL;
+ mtx_lock(&td->toep_list_lock);
+ TAILQ_REMOVE(&td->synqe_list, synqe, link);
+ mtx_unlock(&td->toep_list_lock);
NET_EPOCH_EXIT(et);
REJECT_PASS_ACCEPT_REQ(true);
}
-
- mtx_lock(&td->toep_list_lock);
- TAILQ_INSERT_TAIL(&td->synqe_list, synqe, link);
- mtx_unlock(&td->toep_list_lock);
CTR6(KTR_CXGBE,
"%s: stid %u, tid %u, synqe %p, opt0 %#016lx, opt2 %#08x",
__func__, stid, tid, synqe, be64toh(opt0), be32toh(opt2));
diff --git a/sys/dev/dpaa2/dpaa2_channel.c b/sys/dev/dpaa2/dpaa2_channel.c
index 87b76923a16d..654c6f2baf70 100644
--- a/sys/dev/dpaa2/dpaa2_channel.c
+++ b/sys/dev/dpaa2/dpaa2_channel.c
@@ -146,12 +146,6 @@ dpaa2_chan_setup(device_t dev, device_t iodev, device_t condev, device_t bpdev,
}
ch = malloc(sizeof(struct dpaa2_channel), M_DPAA2_CH, M_WAITOK | M_ZERO);
- if (ch == NULL) {
- device_printf(dev, "%s: malloc() failed\n", __func__);
- error = ENOMEM;
- goto fail_malloc;
- }
-
ch->ni_dev = dev;
ch->io_dev = iodev;
ch->con_dev = condev;
@@ -281,7 +275,6 @@ dpaa2_chan_setup(device_t dev, device_t iodev, device_t condev, device_t bpdev,
/* taskqueue_drain(ch->cleanup_tq, &ch->cleanup_task); */
/* } */
/* taskqueue_free(ch->cleanup_tq); */
-fail_malloc:
(void)DPAA2_CMD_CON_DISABLE(dev, child, DPAA2_CMD_TK(&cmd, contk));
fail_con_enable:
(void)DPAA2_CMD_CON_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, contk));
diff --git a/sys/dev/dpaa2/dpaa2_mc.c b/sys/dev/dpaa2/dpaa2_mc.c
index 66867a18068c..da8f8a077d6b 100644
--- a/sys/dev/dpaa2/dpaa2_mc.c
+++ b/sys/dev/dpaa2/dpaa2_mc.c
@@ -462,8 +462,6 @@ dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags)
return (EINVAL);
di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO);
- if (!di)
- return (ENOMEM);
di->dpaa2_dev = dpaa2_dev;
di->flags = flags;
di->owners = 0;
diff --git a/sys/dev/dpaa2/dpaa2_ni.c b/sys/dev/dpaa2/dpaa2_ni.c
index a21351a20b49..6ed656849709 100644
--- a/sys/dev/dpaa2/dpaa2_ni.c
+++ b/sys/dev/dpaa2/dpaa2_ni.c
@@ -588,11 +588,6 @@ dpaa2_ni_attach(device_t dev)
/* Create a taskqueue thread to release new buffers to the pool. */
sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
taskqueue_thread_enqueue, &sc->bp_taskq);
- if (sc->bp_taskq == NULL) {
- device_printf(dev, "%s: failed to allocate task queue: %s\n",
- __func__, tq_name);
- goto close_ni;
- }
taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
/* sc->cleanup_taskq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, */
@@ -1339,21 +1334,11 @@ dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
M_WAITOK);
- if (buf == NULL) {
- device_printf(dev, "%s: malloc() failed (buf)\n",
- __func__);
- return (ENOMEM);
- }
/* Keep DMA tag and Tx ring linked to the buffer */
DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx);
buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
M_WAITOK);
- if (buf->sgt == NULL) {
- device_printf(dev, "%s: malloc() failed (sgt)\n",
- __func__);
- return (ENOMEM);
- }
/* Link SGT to DMA tag and back to its Tx buffer */
DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf);
diff --git a/sys/dev/drm2/drm_buffer.c b/sys/dev/drm2/drm_buffer.c
index 8a674397262e..8069f2c8c4c6 100644
--- a/sys/dev/drm2/drm_buffer.c
+++ b/sys/dev/drm2/drm_buffer.c
@@ -50,45 +50,15 @@ int drm_buffer_alloc(struct drm_buffer **buf, int size)
* variable sized */
*buf = malloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
-
- if (*buf == NULL) {
- DRM_ERROR("Failed to allocate drm buffer object to hold"
- " %d bytes in %d pages.\n",
- size, nr_pages);
- return -ENOMEM;
- }
-
(*buf)->size = size;
for (idx = 0; idx < nr_pages; ++idx) {
-
(*buf)->data[idx] =
malloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
DRM_MEM_DRIVER, M_WAITOK);
-
-
- if ((*buf)->data[idx] == NULL) {
- DRM_ERROR("Failed to allocate %dth page for drm"
- " buffer with %d bytes and %d pages.\n",
- idx + 1, size, nr_pages);
- goto error_out;
- }
-
}
return 0;
-
-error_out:
-
- /* Only last element can be null pointer so check for it first. */
- if ((*buf)->data[idx])
- free((*buf)->data[idx], DRM_MEM_DRIVER);
-
- for (--idx; idx >= 0; --idx)
- free((*buf)->data[idx], DRM_MEM_DRIVER);
-
- free(*buf, DRM_MEM_DRIVER);
- return -ENOMEM;
}
EXPORT_SYMBOL(drm_buffer_alloc);
diff --git a/sys/dev/drm2/drm_crtc.c b/sys/dev/drm2/drm_crtc.c
index b9415082e7a1..a163c7455773 100644
--- a/sys/dev/drm2/drm_crtc.c
+++ b/sys/dev/drm2/drm_crtc.c
@@ -662,13 +662,6 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
plane->funcs = funcs;
plane->format_types = malloc(sizeof(uint32_t) * format_count,
DRM_MEM_KMS, M_WAITOK);
- if (!plane->format_types) {
- DRM_DEBUG_KMS("out of memory when allocating plane\n");
- drm_mode_object_put(dev, &plane->base);
- ret = -ENOMEM;
- goto out;
- }
-
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
plane->format_count = format_count;
plane->possible_crtcs = possible_crtcs;
@@ -725,8 +718,6 @@ struct drm_display_mode *drm_mode_create(struct drm_device *dev)
nmode = malloc(sizeof(struct drm_display_mode), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
- if (!nmode)
- return NULL;
if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
free(nmode, DRM_MEM_KMS);
@@ -1009,9 +1000,6 @@ int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
group->id_list = malloc(total_objects * sizeof(uint32_t),
DRM_MEM_KMS, M_WAITOK | M_ZERO);
- if (!group->id_list)
- return -ENOMEM;
-
group->num_crtcs = 0;
group->num_connectors = 0;
group->num_encoders = 0;
@@ -1997,10 +1985,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
connector_set = malloc(crtc_req->count_connectors *
sizeof(struct drm_connector *),
DRM_MEM_KMS, M_WAITOK);
- if (!connector_set) {
- ret = -ENOMEM;
- goto out;
- }
for (i = 0; i < crtc_req->count_connectors; i++) {
set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
@@ -2522,11 +2506,6 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
clips = malloc(num_clips * sizeof(*clips), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
- if (!clips) {
- ret = -ENOMEM;
- goto out_err1;
- }
-
ret = copy_from_user(clips, clips_ptr,
num_clips * sizeof(*clips));
if (ret) {
@@ -2773,15 +2752,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
property = malloc(sizeof(struct drm_property), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
- if (!property)
- return NULL;
- if (num_values) {
+ if (num_values)
property->values = malloc(sizeof(uint64_t)*num_values, DRM_MEM_KMS,
M_WAITOK | M_ZERO);
- if (!property->values)
- goto fail;
- }
ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
if (ret)
@@ -2907,9 +2881,6 @@ int drm_property_add_enum(struct drm_property *property, int index,
prop_enum = malloc(sizeof(struct drm_property_enum), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
- if (!prop_enum)
- return -ENOMEM;
-
strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
prop_enum->value = value;
@@ -3103,9 +3074,6 @@ static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev
blob = malloc(sizeof(struct drm_property_blob)+length, DRM_MEM_KMS,
M_WAITOK | M_ZERO);
- if (!blob)
- return NULL;
-
ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
if (ret) {
free(blob, DRM_MEM_KMS);
@@ -3433,10 +3401,6 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
crtc->gamma_store = malloc(gamma_size * sizeof(uint16_t) * 3,
DRM_MEM_KMS, M_WAITOK | M_ZERO);
- if (!crtc->gamma_store) {
- crtc->gamma_size = 0;
- return -ENOMEM;
- }
return 0;
}
@@ -3631,13 +3595,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
mtx_unlock(&dev->event_lock);
e = malloc(sizeof *e, DRM_MEM_KMS, M_WAITOK | M_ZERO);
- if (e == NULL) {
- mtx_lock(&dev->event_lock);
- file_priv->event_space += sizeof e->event;
- mtx_unlock(&dev->event_lock);
- goto out;
- }
-
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof e->event;
e->event.user_data = page_flip->user_data;
diff --git a/sys/dev/drm2/ttm/ttm_object.c b/sys/dev/drm2/ttm/ttm_object.c
index 8c373618d7ac..31af15cf4c56 100644
--- a/sys/dev/drm2/ttm/ttm_object.c
+++ b/sys/dev/drm2/ttm/ttm_object.c
@@ -282,11 +282,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
if (unlikely(ret != 0))
return ret;
ref = malloc(sizeof(*ref), M_TTM_OBJ_REF, M_WAITOK);
- if (unlikely(ref == NULL)) {
- ttm_mem_global_free(mem_glob, sizeof(*ref));
- return -ENOMEM;
- }
-
ref->hash.key = base->hash.key;
ref->obj = base;
ref->tfile = tfile;
diff --git a/sys/dev/ena/ena_rss.c b/sys/dev/ena/ena_rss.c
index d90a7fbb253a..41fa9c62f94a 100644
--- a/sys/dev/ena/ena_rss.c
+++ b/sys/dev/ena/ena_rss.c
@@ -279,12 +279,9 @@ ena_rss_indir_init(struct ena_adapter *adapter)
struct ena_indir *indir = adapter->rss_indir;
int rc;
- if (indir == NULL) {
+ if (indir == NULL)
adapter->rss_indir = indir = malloc(sizeof(struct ena_indir),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (indir == NULL)
- return (ENOMEM);
- }
rc = ena_rss_indir_get(adapter, indir->table);
if (rc != 0) {
diff --git a/sys/dev/etherswitch/infineon/adm6996fc.c b/sys/dev/etherswitch/infineon/adm6996fc.c
index 2c6c83a4388d..64f61df93db1 100644
--- a/sys/dev/etherswitch/infineon/adm6996fc.c
+++ b/sys/dev/etherswitch/infineon/adm6996fc.c
@@ -179,10 +179,6 @@ adm6996fc_attach_phys(struct adm6996fc_softc *sc)
if_initname(sc->ifp[port], name, port);
sc->miibus[port] = malloc(sizeof(device_t), M_ADM6996FC,
M_WAITOK | M_ZERO);
- if (sc->miibus[port] == NULL) {
- err = ENOMEM;
- goto failed;
- }
err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port],
adm6996fc_ifmedia_upd, adm6996fc_ifmedia_sts, \
BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
@@ -255,12 +251,6 @@ adm6996fc_attach(device_t dev)
sc->portphy = malloc(sizeof(int) * sc->numports, M_ADM6996FC,
M_WAITOK | M_ZERO);
- if (sc->ifp == NULL || sc->ifname == NULL || sc->miibus == NULL ||
- sc->portphy == NULL) {
- err = ENOMEM;
- goto failed;
- }
-
/*
* Attach the PHYs and complete the bus enumeration.
*/
@@ -281,14 +271,10 @@ adm6996fc_attach(device_t dev)
return (0);
failed:
- if (sc->portphy != NULL)
- free(sc->portphy, M_ADM6996FC);
- if (sc->miibus != NULL)
- free(sc->miibus, M_ADM6996FC);
- if (sc->ifname != NULL)
- free(sc->ifname, M_ADM6996FC);
- if (sc->ifp != NULL)
- free(sc->ifp, M_ADM6996FC);
+ free(sc->portphy, M_ADM6996FC);
+ free(sc->miibus, M_ADM6996FC);
+ free(sc->ifname, M_ADM6996FC);
+ free(sc->ifp, M_ADM6996FC);
return (err);
}
diff --git a/sys/dev/etherswitch/micrel/ksz8995ma.c b/sys/dev/etherswitch/micrel/ksz8995ma.c
index e512a86202c6..ccd7dbffa9e9 100644
--- a/sys/dev/etherswitch/micrel/ksz8995ma.c
+++ b/sys/dev/etherswitch/micrel/ksz8995ma.c
@@ -225,10 +225,6 @@ ksz8995ma_attach_phys(struct ksz8995ma_softc *sc)
if_initname(sc->ifp[port], name, port);
sc->miibus[port] = malloc(sizeof(device_t), M_KSZ8995MA,
M_WAITOK | M_ZERO);
- if (sc->miibus[port] == NULL) {
- err = ENOMEM;
- goto failed;
- }
err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port],
ksz8995ma_ifmedia_upd, ksz8995ma_ifmedia_sts, \
BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
@@ -305,12 +301,6 @@ ksz8995ma_attach(device_t dev)
sc->portphy = malloc(sizeof(int) * sc->numports, M_KSZ8995MA,
M_WAITOK | M_ZERO);
- if (sc->ifp == NULL || sc->ifname == NULL || sc->miibus == NULL ||
- sc->portphy == NULL) {
- err = ENOMEM;
- goto failed;
- }
-
/*
* Attach the PHYs and complete the bus enumeration.
*/
@@ -339,14 +329,10 @@ ksz8995ma_attach(device_t dev)
return (0);
failed:
- if (sc->portphy != NULL)
- free(sc->portphy, M_KSZ8995MA);
- if (sc->miibus != NULL)
- free(sc->miibus, M_KSZ8995MA);
- if (sc->ifname != NULL)
- free(sc->ifname, M_KSZ8995MA);
- if (sc->ifp != NULL)
- free(sc->ifp, M_KSZ8995MA);
+ free(sc->portphy, M_KSZ8995MA);
+ free(sc->miibus, M_KSZ8995MA);
+ free(sc->ifname, M_KSZ8995MA);
+ free(sc->ifp, M_KSZ8995MA);
return (err);
}
diff --git a/sys/dev/firewire/if_fwip.c b/sys/dev/firewire/if_fwip.c
index 6350ec9cb56e..41143e2e59d4 100644
--- a/sys/dev/firewire/if_fwip.c
+++ b/sys/dev/firewire/if_fwip.c
@@ -304,13 +304,9 @@ fwip_init(void *arg)
xferq->psize = MCLBYTES;
xferq->queued = 0;
xferq->buf = NULL;
- xferq->bulkxfer = (struct fw_bulkxfer *) malloc(
+ xferq->bulkxfer = malloc(
sizeof(struct fw_bulkxfer) * xferq->bnchunk,
M_FWIP, M_WAITOK);
- if (xferq->bulkxfer == NULL) {
- printf("if_fwip: malloc failed\n");
- return;
- }
STAILQ_INIT(&xferq->stvalid);
STAILQ_INIT(&xferq->stfree);
STAILQ_INIT(&xferq->stdma);
diff --git a/sys/dev/flash/flexspi/flex_spi.c b/sys/dev/flash/flexspi/flex_spi.c
index 766a1cfaa332..9382b237ee71 100644
--- a/sys/dev/flash/flexspi/flex_spi.c
+++ b/sys/dev/flash/flexspi/flex_spi.c
@@ -781,12 +781,6 @@ flex_spi_attach(device_t dev)
}
sc->buf = malloc(sc->erasesize, SECTOR_BUFFER, M_WAITOK);
- if (sc->buf == NULL) {
- device_printf(sc->dev, "Unable to set up allocate internal buffer\n");
- flex_spi_detach(dev);
- return (ENOMEM);
- }
-
/* Move it to per-flash */
sc->disk = disk_alloc();
sc->disk->d_open = flex_spi_open;
diff --git a/sys/dev/hpt27xx/hpt27xx_osm_bsd.c b/sys/dev/hpt27xx/hpt27xx_osm_bsd.c
index 225c91b44f21..e086a1554940 100644
--- a/sys/dev/hpt27xx/hpt27xx_osm_bsd.c
+++ b/sys/dev/hpt27xx/hpt27xx_osm_bsd.c
@@ -94,9 +94,6 @@ static int hpt_attach(device_t dev)
size = him->get_adapter_size(&pci_id);
hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK);
- if (!hba->ldm_adapter.him_handle)
- return ENXIO;
-
hba->pcidev = dev;
hba->pciaddr.tree = 0;
hba->pciaddr.bus = pci_get_bus(dev);
@@ -114,10 +111,6 @@ static int hpt_attach(device_t dev)
if (!ldm_register_adapter(&hba->ldm_adapter)) {
size = ldm_get_vbus_size();
vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK);
- if (!vbus_ext) {
- free(hba->ldm_adapter.him_handle, M_DEVBUF);
- return ENXIO;
- }
memset(vbus_ext, 0, sizeof(VBUS_EXT));
vbus_ext->ext_type = EXT_TYPE_VBUS;
ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
@@ -168,7 +161,6 @@ static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
f->tag, f->count, f->size, f->count*f->size));
for (i=0; icount; i++) {
p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK);
- if (!p) return (ENXIO);
*p = f->head;
f->head = p;
}
@@ -1109,10 +1101,6 @@ static void hpt_final_init(void *dummy)
for (i=0; ivbus_ext = vbus_ext;
ext->next = vbus_ext->cmdext_list;
vbus_ext->cmdext_list = ext;
@@ -1327,18 +1315,13 @@ static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, stru
if (ioctl_args.nInBufferSize) {
ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
- if (!ioctl_args.lpInBuffer)
- goto invalid;
if (copyin((void*)piop->lpInBuffer,
ioctl_args.lpInBuffer, piop->nInBufferSize))
goto invalid;
}
- if (ioctl_args.nOutBufferSize) {
+ if (ioctl_args.nOutBufferSize)
ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO);
- if (!ioctl_args.lpOutBuffer)
- goto invalid;
- }
hpt_do_ioctl(&ioctl_args);
diff --git a/sys/dev/hptnr/hptnr_osm_bsd.c b/sys/dev/hptnr/hptnr_osm_bsd.c
index a8ac77c5ad5a..7426873964fb 100644
--- a/sys/dev/hptnr/hptnr_osm_bsd.c
+++ b/sys/dev/hptnr/hptnr_osm_bsd.c
@@ -165,7 +165,6 @@ static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
f->tag, f->count, f->size, f->count*f->size));
for (i=0; icount; i++) {
p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK);
- if (!p) return (ENXIO);
*p = f->head;
f->head = p;
}
@@ -1389,10 +1388,6 @@ static void hpt_final_init(void *dummy)
for (i=0; ivbus_ext = vbus_ext;
ext->next = vbus_ext->cmdext_list;
vbus_ext->cmdext_list = ext;
@@ -1610,19 +1605,14 @@ static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, stru
if (ioctl_args.nInBufferSize) {
ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
- if (!ioctl_args.lpInBuffer)
- goto invalid;
if (copyin((void*)piop->lpInBuffer,
ioctl_args.lpInBuffer, piop->nInBufferSize))
goto invalid;
}
- if (ioctl_args.nOutBufferSize) {
+ if (ioctl_args.nOutBufferSize)
ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO);
- if (!ioctl_args.lpOutBuffer)
- goto invalid;
- }
-
+
hpt_do_ioctl(&ioctl_args);
if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
diff --git a/sys/dev/hptrr/hptrr_osm_bsd.c b/sys/dev/hptrr/hptrr_osm_bsd.c
index 68e9af3aff02..42866c1d4297 100644
--- a/sys/dev/hptrr/hptrr_osm_bsd.c
+++ b/sys/dev/hptrr/hptrr_osm_bsd.c
@@ -1032,10 +1032,6 @@ static void hpt_final_init(void *dummy)
for (i=0; ivbus_ext = vbus_ext;
ext->next = vbus_ext->cmdext_list;
vbus_ext->cmdext_list = ext;
@@ -1252,19 +1248,14 @@ static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, stru
if (ioctl_args.nInBufferSize) {
ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
- if (!ioctl_args.lpInBuffer)
- goto invalid;
if (copyin((void*)piop->lpInBuffer,
ioctl_args.lpInBuffer, piop->nInBufferSize))
goto invalid;
}
- if (ioctl_args.nOutBufferSize) {
+ if (ioctl_args.nOutBufferSize)
ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO);
- if (!ioctl_args.lpOutBuffer)
- goto invalid;
- }
-
+
hpt_do_ioctl(&ioctl_args);
if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
diff --git a/sys/dev/ice/ice_lib.c b/sys/dev/ice/ice_lib.c
index 659412450fce..7077859cc877 100644
--- a/sys/dev/ice/ice_lib.c
+++ b/sys/dev/ice/ice_lib.c
@@ -426,31 +426,21 @@ ice_setup_pf_vsi(struct ice_softc *sc)
* all queues for this VSI are not yet assigned an index and thus,
* not ready for use.
*
- * Returns an error code on failure.
*/
-int
+void
ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues,
const int max_rx_queues)
{
- struct ice_softc *sc = vsi->sc;
int i;
MPASS(max_tx_queues > 0);
MPASS(max_rx_queues > 0);
/* Allocate Tx queue mapping memory */
- if (!(vsi->tx_qmap =
- (u16 *) malloc(sizeof(u16) * max_tx_queues, M_ICE, M_WAITOK))) {
- device_printf(sc->dev, "Unable to allocate Tx qmap memory\n");
- return (ENOMEM);
- }
+ vsi->tx_qmap = malloc(sizeof(u16) * max_tx_queues, M_ICE, M_WAITOK);
/* Allocate Rx queue mapping memory */
- if (!(vsi->rx_qmap =
- (u16 *) malloc(sizeof(u16) * max_rx_queues, M_ICE, M_WAITOK))) {
- device_printf(sc->dev, "Unable to allocate Rx qmap memory\n");
- goto free_tx_qmap;
- }
+ vsi->rx_qmap = malloc(sizeof(u16) * max_rx_queues, M_ICE, M_WAITOK);
/* Mark every queue map as invalid to start with */
for (i = 0; i < max_tx_queues; i++) {
@@ -459,14 +449,6 @@ ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues,
for (i = 0; i < max_rx_queues; i++) {
vsi->rx_qmap[i] = ICE_INVALID_RES_IDX;
}
-
- return 0;
-
-free_tx_qmap:
- free(vsi->tx_qmap, M_ICE);
- vsi->tx_qmap = NULL;
-
- return (ENOMEM);
}
/**
diff --git a/sys/dev/ice/ice_lib.h b/sys/dev/ice/ice_lib.h
index cfd848d370bb..6c010cffc0fd 100644
--- a/sys/dev/ice/ice_lib.h
+++ b/sys/dev/ice/ice_lib.h
@@ -830,7 +830,7 @@ void ice_free_bar(device_t dev, struct ice_bar_info *bar);
void ice_set_ctrlq_len(struct ice_hw *hw);
void ice_release_vsi(struct ice_vsi *vsi);
struct ice_vsi *ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type);
-int ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues,
+void ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues,
const int max_rx_queues);
void ice_free_vsi_qmaps(struct ice_vsi *vsi);
int ice_initialize_vsi(struct ice_vsi *vsi);
diff --git a/sys/dev/ice/if_ice_iflib.c b/sys/dev/ice/if_ice_iflib.c
index 4e451bf3fb55..3de79787f6e8 100644
--- a/sys/dev/ice/if_ice_iflib.c
+++ b/sys/dev/ice/if_ice_iflib.c
@@ -631,12 +631,8 @@ ice_if_attach_pre(if_ctx_t ctx)
*/
ice_setup_pf_vsi(sc);
- err = ice_alloc_vsi_qmap(&sc->pf_vsi, scctx->isc_ntxqsets_max,
+ ice_alloc_vsi_qmap(&sc->pf_vsi, scctx->isc_ntxqsets_max,
scctx->isc_nrxqsets_max);
- if (err) {
- device_printf(dev, "Unable to allocate VSI Queue maps\n");
- goto free_main_vsi;
- }
/* Allocate MSI-X vectors (due to isc_flags IFLIB_SKIP_MSIX) */
err = ice_allocate_msix(sc);
@@ -3518,12 +3514,7 @@ ice_setup_mirror_vsi(struct ice_mirr_if *mif)
mif->vsi = vsi;
/* Reserve VSI queue allocation from PF queues */
- ret = ice_alloc_vsi_qmap(vsi, ICE_DEFAULT_VF_QUEUES, ICE_DEFAULT_VF_QUEUES);
- if (ret) {
- device_printf(dev, "%s: Unable to allocate mirror VSI queue maps (%d queues): %s\n",
- __func__, ICE_DEFAULT_VF_QUEUES, ice_err_str(ret));
- goto release_vsi;
- }
+ ice_alloc_vsi_qmap(vsi, ICE_DEFAULT_VF_QUEUES, ICE_DEFAULT_VF_QUEUES);
vsi->num_tx_queues = vsi->num_rx_queues = ICE_DEFAULT_VF_QUEUES;
/* Assign Tx queues from PF space */
diff --git a/sys/dev/iommu/busdma_iommu.c b/sys/dev/iommu/busdma_iommu.c
index 3d554249ba3f..bddb466547d1 100644
--- a/sys/dev/iommu/busdma_iommu.c
+++ b/sys/dev/iommu/busdma_iommu.c
@@ -278,11 +278,7 @@ iommu_get_dev_ctx(device_t dev)
if (!unit->dma_enabled)
return (NULL);
-#if defined(__amd64__) || defined(__i386__)
- dmar_quirks_pre_use(unit);
- dmar_instantiate_rmrr_ctxs(unit);
-#endif
-
+ iommu_unit_pre_instantiate_ctx(unit);
return (iommu_instantiate_ctx(unit, dev, false));
}
diff --git a/sys/dev/iommu/iommu.h b/sys/dev/iommu/iommu.h
index 157f4c62423f..9845b09e8732 100644
--- a/sys/dev/iommu/iommu.h
+++ b/sys/dev/iommu/iommu.h
@@ -158,6 +158,7 @@ void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
void iommu_domain_unload(struct iommu_domain *domain,
struct iommu_map_entries_tailq *entries, bool cansleep);
+void iommu_unit_pre_instantiate_ctx(struct iommu_unit *iommu);
struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *iommu,
device_t dev, bool rmrr);
device_t iommu_get_requester(device_t dev, uint16_t *rid);
diff --git a/sys/dev/iser/iser_verbs.c b/sys/dev/iser/iser_verbs.c
index f5f057b961ef..f0c7e524ccf0 100644
--- a/sys/dev/iser/iser_verbs.c
+++ b/sys/dev/iser/iser_verbs.c
@@ -212,8 +212,6 @@ iser_create_device_ib_res(struct iser_device *device)
device->comps = malloc(device->comps_used * sizeof(*device->comps),
M_ISER_VERBS, M_WAITOK | M_ZERO);
- if (!device->comps)
- goto comps_err;
max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe);
@@ -280,7 +278,6 @@ iser_create_device_ib_res(struct iser_device *device)
ib_dealloc_pd(device->pd);
pd_err:
free(device->comps, M_ISER_VERBS);
-comps_err:
ISER_ERR("failed to allocate an IB resource");
return (1);
}
@@ -343,11 +340,6 @@ iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd)
int ret;
desc = malloc(sizeof(*desc), M_ISER_VERBS, M_WAITOK | M_ZERO);
- if (!desc) {
- ISER_ERR("Failed to allocate a new fastreg descriptor");
- return (NULL);
- }
-
ret = iser_alloc_reg_res(ib_device, pd, &desc->rsc);
if (ret) {
ISER_ERR("failed to allocate reg_resources");
@@ -509,9 +501,6 @@ iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
goto inc_refcnt;
device = malloc(sizeof *device, M_ISER_VERBS, M_WAITOK | M_ZERO);
- if (device == NULL)
- goto out;
-
/* assign this device to the device */
device->ib_device = cma_id->device;
/* init the device and link it into ig device list */
diff --git a/sys/dev/jme/if_jme.c b/sys/dev/jme/if_jme.c
index 96824e2d7f27..4f739ec26347 100644
--- a/sys/dev/jme/if_jme.c
+++ b/sys/dev/jme/if_jme.c
@@ -872,12 +872,6 @@ jme_attach(device_t dev)
/* Create local taskq. */
sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->jme_tq);
- if (sc->jme_tq == NULL) {
- device_printf(dev, "could not create taskqueue.\n");
- ether_ifdetach(ifp);
- error = ENXIO;
- goto fail;
- }
taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->jme_dev));
diff --git a/sys/dev/liquidio/base/lio_request_manager.c b/sys/dev/liquidio/base/lio_request_manager.c
index f4eae0c8bf31..95eac12ecf3b 100644
--- a/sys/dev/liquidio/base/lio_request_manager.c
+++ b/sys/dev/liquidio/base/lio_request_manager.c
@@ -159,11 +159,6 @@ lio_init_instr_queue(struct octeon_device *oct, union octeon_txpciq txpciq,
db_tq = &oct->check_db_tq[iq_no];
db_tq->tq = taskqueue_create("lio_check_db_timeout", M_WAITOK,
taskqueue_thread_enqueue, &db_tq->tq);
- if (db_tq->tq == NULL) {
- lio_dev_err(oct, "check db wq create failed for iq %d\n",
- iq_no);
- return (1);
- }
TIMEOUT_TASK_INIT(db_tq->tq, &db_tq->work, 0, lio_check_db_timeout,
(void *)db_tq);
@@ -179,10 +174,6 @@ lio_init_instr_queue(struct octeon_device *oct, union octeon_txpciq txpciq,
oct->instr_queue[iq_no]->br =
buf_ring_alloc(LIO_BR_SIZE, M_DEVBUF, M_WAITOK,
&oct->instr_queue[iq_no]->enq_lock);
- if (oct->instr_queue[iq_no]->br == NULL) {
- lio_dev_err(oct, "Critical Failure setting up buf ring\n");
- return (1);
- }
return (0);
}
diff --git a/sys/dev/liquidio/base/lio_response_manager.c b/sys/dev/liquidio/base/lio_response_manager.c
index 12a3ad60521e..ac5fc6229885 100644
--- a/sys/dev/liquidio/base/lio_response_manager.c
+++ b/sys/dev/liquidio/base/lio_response_manager.c
@@ -59,10 +59,6 @@ lio_setup_response_list(struct octeon_device *oct)
ctq = &oct->dma_comp_tq;
ctq->tq = taskqueue_create("lio_dma_comp", M_WAITOK,
taskqueue_thread_enqueue, &ctq->tq);
- if (ctq->tq == NULL) {
- lio_dev_err(oct, "failed to create wq thread\n");
- return (-ENOMEM);
- }
TIMEOUT_TASK_INIT(ctq->tq, &ctq->work, 0, lio_poll_req_completion,
(void *)ctq);
diff --git a/sys/dev/liquidio/lio_ioctl.c b/sys/dev/liquidio/lio_ioctl.c
index 10c88b209051..b2fd54f59580 100644
--- a/sys/dev/liquidio/lio_ioctl.c
+++ b/sys/dev/liquidio/lio_ioctl.c
@@ -481,7 +481,7 @@ lio_get_new_flags(if_t ifp)
* Accept all multicast addresses if there are more than we
* can handle
*/
- if (if_getamcount(ifp) > LIO_MAX_MULTICAST_ADDR)
+ if (if_llmaddr_count(ifp) > LIO_MAX_MULTICAST_ADDR)
f |= LIO_IFFLAG_ALLMULTI;
}
if (if_getflags(ifp) & IFF_BROADCAST)
diff --git a/sys/dev/liquidio/lio_main.c b/sys/dev/liquidio/lio_main.c
index 7104ff07674f..3c73a6b10eed 100644
--- a/sys/dev/liquidio/lio_main.c
+++ b/sys/dev/liquidio/lio_main.c
@@ -1854,10 +1854,6 @@ lio_setup_rx_oom_poll_fn(if_t ifp)
rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK,
taskqueue_thread_enqueue,
&rx_status_tq->tq);
- if (rx_status_tq->tq == NULL) {
- lio_dev_err(oct, "unable to create lio rx oom status tq\n");
- return (-1);
- }
TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0,
lio_poll_check_rx_oom_status, (void *)rx_status_tq);
diff --git a/sys/dev/liquidio/lio_sysctl.c b/sys/dev/liquidio/lio_sysctl.c
index 729f4d432274..61a7e96098c8 100644
--- a/sys/dev/liquidio/lio_sysctl.c
+++ b/sys/dev/liquidio/lio_sysctl.c
@@ -744,9 +744,6 @@ lio_get_regs(SYSCTL_HANDLER_ARGS)
regbuf = malloc(sizeof(char) * LIO_REGDUMP_LEN_XXXX, M_DEVBUF,
M_WAITOK | M_ZERO);
- if (regbuf == NULL)
- return (error);
-
switch (oct->chip_id) {
case LIO_CN23XX_PF_VID:
len += lio_cn23xx_pf_read_csr_reg(regbuf, oct);
diff --git a/sys/dev/mana/gdma_main.c b/sys/dev/mana/gdma_main.c
index 6f3e182ba1eb..13f2617ad7d4 100644
--- a/sys/dev/mana/gdma_main.c
+++ b/sys/dev/mana/gdma_main.c
@@ -868,9 +868,6 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
int err;
queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!queue)
- return ENOMEM;
-
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
if (err)
@@ -962,9 +959,6 @@ mana_gd_create_dma_region(struct gdma_dev *gd,
}
req = malloc(req_msg_size, M_DEVBUF, M_WAITOK | M_ZERO);
- if (!req)
- return ENOMEM;
-
mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
req_msg_size, sizeof(resp));
req->length = length;
@@ -1008,9 +1002,6 @@ mana_gd_create_mana_eq(struct gdma_dev *gd,
return EINVAL;
queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!queue)
- return ENOMEM;
-
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
if (err)
@@ -1056,9 +1047,6 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
return EINVAL;
queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!queue)
- return ENOMEM;
-
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
if (err)
@@ -1480,9 +1468,6 @@ mana_gd_alloc_res_map(uint32_t res_avail,
r->map =
malloc(n * sizeof(unsigned long), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!r->map)
- return ENOMEM;
-
r->size = res_avail;
mtx_init(&r->lock_spin, lock_name, NULL, MTX_SPIN);
@@ -1616,10 +1601,6 @@ mana_gd_setup_irqs(device_t dev)
gc->irq_contexts = malloc(nvec * sizeof(struct gdma_irq_context),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!gc->irq_contexts) {
- rc = ENOMEM;
- goto err_setup_irq_release;
- }
for (i = 0; i < nvec; i++) {
gic = &gc->irq_contexts[i];
diff --git a/sys/dev/mana/hw_channel.c b/sys/dev/mana/hw_channel.c
index 7a40a28894fb..5904389596a3 100644
--- a/sys/dev/mana/hw_channel.c
+++ b/sys/dev/mana/hw_channel.c
@@ -416,8 +416,6 @@ mana_hwc_create_cq(struct hw_channel_context *hwc,
cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
hwc_cq = malloc(sizeof(*hwc_cq), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!hwc_cq)
- return ENOMEM;
err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
if (err) {
@@ -438,10 +436,6 @@ mana_hwc_create_cq(struct hw_channel_context *hwc,
comp_buf = mallocarray(q_depth, sizeof(struct gdma_comp),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!comp_buf) {
- err = ENOMEM;
- goto out;
- }
hwc_cq->hwc = hwc;
hwc_cq->comp_buf = comp_buf;
@@ -476,8 +470,6 @@ mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, uint16_t q_depth,
dma_buf = malloc(sizeof(*dma_buf) +
q_depth * sizeof(struct hwc_work_request),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!dma_buf)
- return ENOMEM;
dma_buf->num_reqs = q_depth;
@@ -560,8 +552,6 @@ mana_hwc_create_wq(struct hw_channel_context *hwc,
queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
hwc_wq = malloc(sizeof(*hwc_wq), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!hwc_wq)
- return ENOMEM;
err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
if (err)
@@ -669,8 +659,6 @@ mana_hwc_test_channel(struct hw_channel_context *hwc, uint16_t q_depth,
ctx = malloc(q_depth * sizeof(struct hwc_caller_ctx),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!ctx)
- return ENOMEM;
for (i = 0; i < q_depth; ++i)
init_completion(&ctx[i].comp_event);
@@ -719,9 +707,6 @@ mana_hwc_establish_channel(struct gdma_context *gc, uint16_t *q_depth,
gc->cq_table = malloc(gc->max_num_cqs * sizeof(struct gdma_queue *),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!gc->cq_table)
- return ENOMEM;
-
gc->cq_table[cq->id] = cq;
return 0;
@@ -782,8 +767,6 @@ mana_hwc_create_channel(struct gdma_context *gc)
int err;
hwc = malloc(sizeof(*hwc), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!hwc)
- return ENOMEM;
gd->gdma_context = gc;
gd->driver_data = hwc;
diff --git a/sys/dev/mana/mana_en.c b/sys/dev/mana/mana_en.c
index 961399172688..90db036ff59b 100644
--- a/sys/dev/mana/mana_en.c
+++ b/sys/dev/mana/mana_en.c
@@ -921,13 +921,6 @@ mana_init_port_context(struct mana_port_context *apc)
apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!apc->rxqs) {
- bus_dma_tag_destroy(apc->tx_buf_tag);
- bus_dma_tag_destroy(apc->rx_buf_tag);
- apc->rx_buf_tag = NULL;
- return ENOMEM;
- }
-
return 0;
}
@@ -1156,8 +1149,6 @@ mana_cfg_vport_steering(struct mana_port_context *apc,
req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
- if (!req)
- return ENOMEM;
mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
sizeof(resp));
@@ -1325,8 +1316,6 @@ mana_create_eq(struct mana_context *ac)
ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!ac->eqs)
- return ENOMEM;
spec.type = GDMA_EQ;
spec.monitor_avl_buf = false;
@@ -2043,8 +2032,6 @@ mana_create_txq(struct mana_port_context *apc, if_t net)
apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!apc->tx_qp)
- return ENOMEM;
/* The minimum size of the WQE is 32 bytes, hence
* MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
@@ -2141,14 +2128,6 @@ mana_create_txq(struct mana_port_context *apc, if_t net)
txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE *
sizeof(struct mana_send_buf_info),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (unlikely(txq->tx_buf_info == NULL)) {
- if_printf(net,
- "Failed to allocate tx buf info for SQ %u\n",
- txq->gdma_sq->id);
- err = ENOMEM;
- goto out;
- }
-
snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
"mana:tx(%d)", i);
@@ -2156,13 +2135,6 @@ mana_create_txq(struct mana_port_context *apc, if_t net)
txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE,
M_DEVBUF, M_WAITOK, &txq->txq_mtx);
- if (unlikely(txq->txq_br == NULL)) {
- if_printf(net,
- "Failed to allocate buf ring for SQ %u\n",
- txq->gdma_sq->id);
- err = ENOMEM;
- goto out;
- }
/* Allocate taskqueue for deferred send */
TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
@@ -2353,9 +2325,6 @@ mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
rxq = malloc(sizeof(*rxq) +
RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!rxq)
- return NULL;
-
rxq->ndev = ndev;
rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
rxq->rxq_idx = rxq_idx;
@@ -2808,12 +2777,6 @@ mana_probe_port(struct mana_context *ac, int port_idx,
*ndev_storage = ndev;
apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!apc) {
- mana_err(NULL, "Failed to allocate port context\n");
- err = ENOMEM;
- goto free_net;
- }
-
apc->ac = ac;
apc->ndev = ndev;
apc->max_queues = gc->max_num_queues;
@@ -2892,7 +2855,6 @@ mana_probe_port(struct mana_context *ac, int port_idx,
reset_apc:
free(apc, M_DEVBUF);
-free_net:
*ndev_storage = NULL;
if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
if_free(ndev);
@@ -2915,9 +2877,6 @@ int mana_probe(struct gdma_dev *gd)
return err;
ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!ac)
- return ENOMEM;
-
ac->gdma_dev = gd;
ac->num_ports = 1;
gd->driver_data = ac;
diff --git a/sys/dev/mfi/mfi.c b/sys/dev/mfi/mfi.c
index 52e278c43fa1..3375c9c37a85 100644
--- a/sys/dev/mfi/mfi.c
+++ b/sys/dev/mfi/mfi.c
@@ -3683,11 +3683,8 @@ mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct
mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
M_WAITOK);
mtx_lock(&sc->mfi_io_lock);
- if (mfi_aen_entry != NULL) {
- mfi_aen_entry->p = curproc;
- TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
- aen_link);
- }
+ mfi_aen_entry->p = curproc;
+ TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry, aen_link);
error = mfi_aen_register(sc, l_aen.laen_seq_num,
l_aen.laen_class_locale);
diff --git a/sys/dev/mlx/mlx.c b/sys/dev/mlx/mlx.c
index 8e86a00222da..7e4cb443894a 100644
--- a/sys/dev/mlx/mlx.c
+++ b/sys/dev/mlx/mlx.c
@@ -2075,8 +2075,8 @@ mlx_user_command(struct mlx_softc *sc, struct mlx_usercommand *mu)
goto out;
}
MLX_IO_UNLOCK(sc);
- if (((kbuf = malloc(mu->mu_datasize, M_DEVBUF, M_WAITOK)) == NULL) ||
- (error = copyin(mu->mu_buf, kbuf, mu->mu_datasize))) {
+ kbuf = malloc(mu->mu_datasize, M_DEVBUF, M_WAITOK);
+ if ((error = copyin(mu->mu_buf, kbuf, mu->mu_datasize))) {
MLX_IO_LOCK(sc);
goto out;
}
diff --git a/sys/dev/mlx5/device.h b/sys/dev/mlx5/device.h
index 50995d4f70a7..67c129a0f2b3 100644
--- a/sys/dev/mlx5/device.h
+++ b/sys/dev/mlx5/device.h
@@ -1123,24 +1123,6 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
-#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
- MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
-
-#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
-
-#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
- MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
-
-#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
-
-#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
- MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
-
-#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
-
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
diff --git a/sys/dev/mpi3mr/mpi3mr_cam.c b/sys/dev/mpi3mr/mpi3mr_cam.c
index e00d61073d96..b4999e204ab7 100644
--- a/sys/dev/mpi3mr/mpi3mr_cam.c
+++ b/sys/dev/mpi3mr/mpi3mr_cam.c
@@ -2098,12 +2098,6 @@ mpi3mr_cam_attach(struct mpi3mr_softc *sc)
mpi3mr_dprint(sc, MPI3MR_XINFO, "Starting CAM Attach\n");
cam_sc = malloc(sizeof(struct mpi3mr_cam_softc), M_MPI3MR, M_WAITOK|M_ZERO);
- if (!cam_sc) {
- mpi3mr_dprint(sc, MPI3MR_ERROR,
- "Failed to allocate memory for controller CAM instance\n");
- return (ENOMEM);
- }
-
cam_sc->maxtargets = sc->facts.max_perids + 1;
TAILQ_INIT(&cam_sc->tgt_list);
diff --git a/sys/dev/mrsas/mrsas_ioctl.c b/sys/dev/mrsas/mrsas_ioctl.c
index f07d139794af..6cefdc23d4df 100644
--- a/sys/dev/mrsas/mrsas_ioctl.c
+++ b/sys/dev/mrsas/mrsas_ioctl.c
@@ -465,13 +465,6 @@ mrsas_user_command(struct mrsas_softc *sc, struct mfi_ioc_passthru *ioc)
kern_sge[0].length = 0;
} else {
ioctl_temp_data_mem = malloc(ioc->buf_size, M_MRSAS, M_WAITOK);
- if (ioctl_temp_data_mem == NULL) {
- device_printf(sc->mrsas_dev, "Could not allocate "
- "%d memory for temporary passthrough ioctl\n",
- ioc->buf_size);
- ret = ENOMEM;
- goto out;
- }
/* Copy in data from user space */
ret = copyin(ioc->buf, ioctl_temp_data_mem, ioc->buf_size);
@@ -486,12 +479,6 @@ mrsas_user_command(struct mrsas_softc *sc, struct mfi_ioc_passthru *ioc)
*/
passcmd = malloc(sizeof(struct mrsas_passthru_cmd), M_MRSAS,
M_WAITOK);
- if (passcmd == NULL) {
- device_printf(sc->mrsas_dev, "Could not allocate "
- "memory for temporary passthrough cb struct\n");
- ret = ENOMEM;
- goto out;
- }
passcmd->complete = 0;
passcmd->sc = sc;
passcmd->cmd = cmd;
diff --git a/sys/dev/mxge/if_mxge.c b/sys/dev/mxge/if_mxge.c
index 2d7d44f48024..c974569257ab 100644
--- a/sys/dev/mxge/if_mxge.c
+++ b/sys/dev/mxge/if_mxge.c
@@ -4615,10 +4615,6 @@ mxge_attach(device_t dev)
TASK_INIT(&sc->watchdog_task, 1, mxge_watchdog_task, sc);
sc->tq = taskqueue_create("mxge_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->tq);
- if (sc->tq == NULL) {
- err = ENOMEM;
- goto abort_with_nothing;
- }
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, /* alignment */
@@ -4815,7 +4811,6 @@ mxge_attach(device_t dev)
taskqueue_free(sc->tq);
sc->tq = NULL;
}
-abort_with_nothing:
return err;
}
diff --git a/sys/dev/neta/if_mvneta.c b/sys/dev/neta/if_mvneta.c
index e663306509a0..bc57b10b6d8e 100644
--- a/sys/dev/neta/if_mvneta.c
+++ b/sys/dev/neta/if_mvneta.c
@@ -3000,8 +3000,6 @@ mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
struct mvneta_rx_desc *r;
struct mvneta_buf *rxbuf;
struct mbuf *m;
- struct lro_ctrl *lro;
- struct lro_entry *queued;
void *pktbuf;
int i, pktlen, processed, ndma;
@@ -3115,11 +3113,7 @@ mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
/*
* Flush any outstanding LRO work
*/
- lro = &rx->lro;
- while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) {
- LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next);
- tcp_lro_flush(lro, queued);
- }
+ tcp_lro_flush_all(&rx->lro);
}
STATIC void
diff --git a/sys/dev/nvmf/controller/nvmft_controller.c b/sys/dev/nvmf/controller/nvmft_controller.c
index dee4d8c92d3d..3c10fea75c9d 100644
--- a/sys/dev/nvmf/controller/nvmft_controller.c
+++ b/sys/dev/nvmf/controller/nvmft_controller.c
@@ -954,7 +954,7 @@ nvmft_handle_admin_command(struct nvmft_controller *ctrlr,
if (NVMEV(NVME_CC_REG_EN, ctrlr->cc) == 0 &&
cmd->opc != NVME_OPC_FABRICS_COMMANDS) {
nvmft_printf(ctrlr,
- "Unsupported admin opcode %#x whiled disabled\n", cmd->opc);
+ "Unsupported admin opcode %#x while disabled\n", cmd->opc);
nvmft_send_generic_error(ctrlr->admin, nc,
NVME_SC_COMMAND_SEQUENCE_ERROR);
nvmf_free_capsule(nc);
diff --git a/sys/dev/nvmf/nvmf_tcp.c b/sys/dev/nvmf/nvmf_tcp.c
index 67d239b63faf..22275aaa835b 100644
--- a/sys/dev/nvmf/nvmf_tcp.c
+++ b/sys/dev/nvmf/nvmf_tcp.c
@@ -1784,7 +1784,6 @@ tcp_send_controller_data(struct nvmf_capsule *nc, uint32_t data_offset,
{
struct nvmf_tcp_qpair *qp = TQP(nc->nc_qpair);
struct nvme_sgl_descriptor *sgl;
- struct mbuf *n, *p;
uint32_t data_len;
bool last_pdu, last_xfer;
@@ -1813,21 +1812,29 @@ tcp_send_controller_data(struct nvmf_capsule *nc, uint32_t data_offset,
/* Queue one more C2H_DATA PDUs containing the data from 'm'. */
while (m != NULL) {
+ struct mbuf *n;
uint32_t todo;
- todo = m->m_len;
- p = m;
- n = p->m_next;
- while (n != NULL) {
- if (todo + n->m_len > qp->max_tx_data) {
- p->m_next = NULL;
- break;
- }
- todo += n->m_len;
- p = n;
+ if (m->m_len > qp->max_tx_data) {
+ n = m_split(m, qp->max_tx_data, M_WAITOK);
+ todo = m->m_len;
+ } else {
+ struct mbuf *p;
+
+ todo = m->m_len;
+ p = m;
n = p->m_next;
+ while (n != NULL) {
+ if (todo + n->m_len > qp->max_tx_data) {
+ p->m_next = NULL;
+ break;
+ }
+ todo += n->m_len;
+ p = n;
+ n = p->m_next;
+ }
+ MPASS(m_length(m, NULL) == todo);
}
- MPASS(m_length(m, NULL) == todo);
last_pdu = (n == NULL && last_xfer);
tcp_send_c2h_pdu(qp, nc->nc_sqe.cid, data_offset, m, todo,
diff --git a/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c b/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c
index c8c9eb8c8dd8..cd1b80c3d712 100644
--- a/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c
+++ b/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c
@@ -318,13 +318,6 @@ int agtiapi_getdevlist( struct agtiapi_softc *pCard,
sizeof(void *) );
AGTIAPI_PRINTK("agtiapi_getdevlist: portCount %d\n", pCard->portCount);
devList = malloc(memNeeded1, TEMP2, M_WAITOK);
- if (devList == NULL)
- {
- AGTIAPI_PRINTK("agtiapi_getdevlist: failed to allocate memory\n");
- ret_val = IOCTL_CALL_FAIL;
- agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR;
- return ret_val;
- }
osti_memset(devList, 0, memNeeded1);
pPortalData = &pCard->pPortalData[0];
pDeviceHandleList = (bit8*)devList;
@@ -970,13 +963,8 @@ static int agtiapi_attach( device_t devx )
}
else
{
- pmsc->pPortalData = (ag_portal_data_t *)
- malloc( sizeof(ag_portal_data_t) * pmsc->portCount,
+ pmsc->pPortalData = malloc( sizeof(ag_portal_data_t) * pmsc->portCount,
M_PMC_MPRT, M_ZERO | M_WAITOK );
- if (pmsc->pPortalData == NULL)
- {
- AGTIAPI_PRINTK( "agtiapi_attach: Portal memory allocation ERROR\n" );
- }
}
pPortalData = pmsc->pPortalData;
@@ -1227,32 +1215,14 @@ STATIC agBOOLEAN agtiapi_InitCardHW( struct agtiapi_softc *pmsc )
pmsc->flags |= AGTIAPI_SYS_INTR_ON;
numVal = sizeof(ag_device_t) * pmsc->devDiscover;
- pmsc->pDevList =
- (ag_device_t *)malloc( numVal, M_PMC_MDVT, M_ZERO | M_WAITOK );
- if( !pmsc->pDevList ) {
- AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d DevList ERROR\n", numVal );
- panic( "agtiapi_InitCardHW\n" );
- return AGTIAPI_FAIL;
- }
+ pmsc->pDevList = malloc( numVal, M_PMC_MDVT, M_ZERO | M_WAITOK );
#ifdef LINUX_PERBI_SUPPORT
numVal = sizeof(ag_slr_map_t) * pmsc->devDiscover;
- pmsc->pSLRList =
- (ag_slr_map_t *)malloc( numVal, M_PMC_MSLR, M_ZERO | M_WAITOK );
- if( !pmsc->pSLRList ) {
- AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d SLRList ERROR\n", numVal );
- panic( "agtiapi_InitCardHW SLRL\n" );
- return AGTIAPI_FAIL;
- }
+ pmsc->pSLRList = malloc( numVal, M_PMC_MSLR, M_ZERO | M_WAITOK );
numVal = sizeof(ag_tgt_map_t) * pmsc->devDiscover;
- pmsc->pWWNList =
- (ag_tgt_map_t *)malloc( numVal, M_PMC_MTGT, M_ZERO | M_WAITOK );
- if( !pmsc->pWWNList ) {
- AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d WWNList ERROR\n", numVal );
- panic( "agtiapi_InitCardHW WWNL\n" );
- return AGTIAPI_FAIL;
- }
+ pmsc->pWWNList = malloc( numVal, M_PMC_MTGT, M_ZERO | M_WAITOK );
// Get the WWN_to_target_ID mappings from the
// holding area which contains the input of the
diff --git a/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c b/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c
index b8a17344bdea..a70f25d57dcb 100644
--- a/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c
+++ b/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c
@@ -146,8 +146,6 @@ adf_processes_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
return ENXIO;
}
prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
- if (!prv_data)
- return ENOMEM;
INIT_LIST_HEAD(&prv_data->list);
error = devfs_set_cdevpriv(prv_data, adf_processes_release);
if (error) {
@@ -573,14 +571,8 @@ adf_state_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
int ret = 0;
prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
- if (!prv_data)
- return -ENOMEM;
entry_proc_events =
malloc(sizeof(struct entry_proc_events), M_QAT, M_WAITOK | M_ZERO);
- if (!entry_proc_events) {
- free(prv_data, M_QAT);
- return -ENOMEM;
- }
mtx_lock(&mtx);
prv_data->cdev = dev;
prv_data->cdev->si_drv1 = prv_data;
diff --git a/sys/dev/qat/qat_common/adf_freebsd_uio.c b/sys/dev/qat/qat_common/adf_freebsd_uio.c
index c109fc79b0f4..64efde72b4b8 100644
--- a/sys/dev/qat/qat_common/adf_freebsd_uio.c
+++ b/sys/dev/qat/qat_common/adf_freebsd_uio.c
@@ -199,10 +199,6 @@ adf_alloc_bundle(struct adf_accel_dev *accel_dev, int bundle_nr)
accel = accel_dev->accel;
handle = malloc(sizeof(*handle), M_QAT, M_WAITOK | M_ZERO);
- if (!handle) {
- printf("ERROR in adf_alloc_bundle %d\n", __LINE__);
- return ENOMEM;
- }
handle->accel = accel;
handle->bundle = bundle_nr;
@@ -294,10 +290,6 @@ adf_uio_mmap_single(struct cdev *dev,
/* Adding pid to bundle list */
instance_rings =
malloc(sizeof(*instance_rings), M_QAT, M_WAITOK | M_ZERO);
- if (!instance_rings) {
- printf("QAT: Memory allocation error - line: %d\n", __LINE__);
- return -ENOMEM;
- }
instance_rings->user_pid = curproc->p_pid;
instance_rings->ring_mask = 0;
mutex_lock(&bundle->list_lock);
diff --git a/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c b/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c
index 6fb4cf0bf2f7..954e31c683ce 100644
--- a/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c
+++ b/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c
@@ -123,9 +123,6 @@ get_orphan_bundle(int bank,
orphan_bundle =
malloc(sizeof(*orphan_bundle), M_QAT, M_WAITOK | M_ZERO);
- if (!orphan_bundle)
- return ENOMEM;
-
csr_base = accel->bar->virt_addr;
orphan_bundle->csr_base = csr_base;
orphan_bundle->bank = bank;
diff --git a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c
index 05a99ae43ab7..9b66ae4b2370 100644
--- a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c
+++ b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c
@@ -117,11 +117,6 @@ adf_attach(device_t dev)
}
/* Allocate and configure device configuration structure */
hw_data = malloc(sizeof(*hw_data), M_QAT_4XXXVF, M_WAITOK | M_ZERO);
- if (!hw_data) {
- ret = -ENOMEM;
- goto out_err;
- }
-
accel_dev->hw_device = hw_data;
adf_init_hw_data_4xxxiov(accel_dev->hw_device);
accel_pci_dev->revid = pci_get_revid(dev);
diff --git a/sys/dev/sdhci/sdhci.c b/sys/dev/sdhci/sdhci.c
index 2403f60d613c..c747f54cb32c 100644
--- a/sys/dev/sdhci/sdhci.c
+++ b/sys/dev/sdhci/sdhci.c
@@ -72,10 +72,10 @@ static int sdhci_debug = 0;
SYSCTL_INT(_hw_sdhci, OID_AUTO, debug, CTLFLAG_RWTUN, &sdhci_debug, 0,
"Debug level");
u_int sdhci_quirk_clear = 0;
-SYSCTL_INT(_hw_sdhci, OID_AUTO, quirk_clear, CTLFLAG_RWTUN, &sdhci_quirk_clear,
+SYSCTL_UINT(_hw_sdhci, OID_AUTO, quirk_clear, CTLFLAG_RWTUN, &sdhci_quirk_clear,
0, "Mask of quirks to clear");
u_int sdhci_quirk_set = 0;
-SYSCTL_INT(_hw_sdhci, OID_AUTO, quirk_set, CTLFLAG_RWTUN, &sdhci_quirk_set, 0,
+SYSCTL_UINT(_hw_sdhci, OID_AUTO, quirk_set, CTLFLAG_RWTUN, &sdhci_quirk_set, 0,
"Mask of quirks to set");
#define RD1(slot, off) SDHCI_READ_1((slot)->bus, (slot), (off))
diff --git a/sys/dev/sdhci/sdhci_xenon_acpi.c b/sys/dev/sdhci/sdhci_xenon_acpi.c
index 01b6c14dc5f2..3e8b2c4a349c 100644
--- a/sys/dev/sdhci/sdhci_xenon_acpi.c
+++ b/sys/dev/sdhci/sdhci_xenon_acpi.c
@@ -86,8 +86,6 @@ sdhci_xenon_acpi_attach(device_t dev)
memset(&mmc_helper, 0, sizeof(mmc_helper));
slot = malloc(sizeof(*slot), M_DEVBUF, M_ZERO | M_WAITOK);
- if (!slot)
- return (ENOMEM);
/*
* Don't use regularators.
diff --git a/sys/dev/sdio/sdiodevs b/sys/dev/sdio/sdiodevs
index 194ef8e5d901..8c341e77d9f9 100644
--- a/sys/dev/sdio/sdiodevs
+++ b/sys/dev/sdio/sdiodevs
@@ -42,8 +42,11 @@
* List of TPLMID_MANF "vendor ID"s.
* Please sort by vendor ID ascending.
*/
+vendor REALTEK 0x024c Realtek
+vendor ATHEROS 0x0271 Atheros
vendor BROADCOM 0x02d0 Broadcom
vendor CYPRESS 0x02d0 Cypress/Broadcom
+vendor MEDIATEK 0x037a MediaTek
/*
* --------------------------------------------------------------------------
@@ -51,6 +54,21 @@ vendor CYPRESS 0x02d0 Cypress/Broadcom
* Please group by vendor in same order as above.
*/
+/* Realtek products */
+/* PR 251063 */
+product REALTEK RTW8723BS 0xb723 802.11bgn SDIO WLAN with Bluetooth 4.0 Single-Chip Controller
+/* rtw88 */
+product REALTEK RTW8821BS 0xb821
+product REALTEK RTW8822BS 0xb822 802.11ac/abgn SDIO WLAN with Bluetooth 4.1 Single-Chip Controller
+product REALTEK RTW8821CS 0xc821 802.11ac/abgn SDIO WLAN with Bluetooth 4.2 Single-Chip Controller
+product REALTEK RTW8822CS 0xc822 802.11ac/abgn SDIO WLAN with Bluetooth x.x Single-Chip Controller
+product REALTEK RTW8723DS_1ANT 0xd724 802.11bgn SDIO WLAN with Bluetooth 4.2 Single-Chip Controller
+product REALTEK RTW8723DS_2ANT 0xd723 802.11bgn SDIO WLAN with Bluetooth 4.2 Single-Chip Controller
+
+/* Atheros/QCA products */
+product ATHEROS AR6005 0x050a Qualcomm Atheros 802.11ac WLAN SDIO
+product ATHEROS QCA9377 0x0701 Qualcomm Atheros 802.11ac WLAN SDIO
+
/* Broadcom products */
product BROADCOM 43241 0x4324 BCM43241 fullmac SDIO WiFi
product BROADCOM 4329 0x4329 BCM4329 fullmac SDIO WiFi
@@ -61,13 +79,25 @@ product BROADCOM 4339 0x4339 BCM4339 fullmac SDIO WiFi
product BROADCOM 4345 0x4345 BCM4345 fullmac SDIO WiFi
product BROADCOM 4354 0x4354 BCM4354 fullmac SDIO WiFi
product BROADCOM 4356 0x4356 BCM4356 fullmac SDIO WiFi
+product BROADCOM 4359 0x4359 BCM4359 fullmac SDIO WiFi
product BROADCOM 43143 0xa887 BCM43143 fullmac SDIO WiFi
product BROADCOM 43340 0xa94c BCM43340 fullmac SDIO WiFi
product BROADCOM 43341 0xa94d BCM43341 fullmac SDIO WiFi
product BROADCOM 43362 0xa962 BCM43362 fullmac SDIO WiFi
product BROADCOM 43364 0xa9a4 BCM43364 fullmac SDIO WiFi
product BROADCOM 43430 0xa9a6 BCM43430 fullmac SDIO WiFi
+product BROADCOM 43439 0xa9af BCM43439 fullmac SDIO WiFi
product BROADCOM 43455 0xa9bf BCM43455 fullmac SDIO WiFi
-product CYPRESS 4373 0x4373 CY4373 fullmac SDIO WiFi
+product BROADCOM CYPRESS_4373 0x4373 BCMCY4373 fullmac SDIO WiFi
+product BROADCOM CYPRESS_43012 0xa804 BCMCY43012 fullmac SDIO WiFi
+product BROADCOM CYPRESS_43752 0xaae8 BCMCY43752 fullmac SDIO WiFi
+product BROADCOM CYPRESS_89359 0x4355 BCMCY89359 fullmac SDIO WiFi
+
+product CYPRESS 43439 0xbd3d CY43439 fullmac SDIO WiFi
+palias BROADCOM_CYPRESS_43439 CYPRESS_43439
+
+/* MediaTek products */
+product MEDIATEK MT7663S 0x7603 MediaTek MT7663S SDIO WiFi
+product MEDIATEK MT7921S 0x7901 MediaTek MT7921S SDIO WiFi
/* end */
diff --git a/sys/dev/sound/macio/i2s.c b/sys/dev/sound/macio/i2s.c
index 5f8cb3aa15f7..647d66c27bba 100644
--- a/sys/dev/sound/macio/i2s.c
+++ b/sys/dev/sound/macio/i2s.c
@@ -241,10 +241,8 @@ i2s_attach(device_t self)
* Register a hook for delayed attach in order to allow
* the I2C controller to attach.
*/
- if ((i2s_delayed_attach = malloc(sizeof(struct intr_config_hook),
- M_TEMP, M_WAITOK | M_ZERO)) == NULL)
- return (ENOMEM);
-
+ i2s_delayed_attach = malloc(sizeof(struct intr_config_hook),
+ M_TEMP, M_WAITOK | M_ZERO);
i2s_delayed_attach->ich_func = i2s_postattach;
i2s_delayed_attach->ich_arg = sc;
diff --git a/sys/dev/sound/usb/uaudio.c b/sys/dev/sound/usb/uaudio.c
index 90b1ec3bad04..1ed259b8e617 100644
--- a/sys/dev/sound/usb/uaudio.c
+++ b/sys/dev/sound/usb/uaudio.c
@@ -2687,8 +2687,6 @@ uaudio_chan_init(struct uaudio_chan *ch, struct snd_dbuf *b,
DPRINTF("Worst case buffer is %d bytes\n", (int)buf_size);
ch->buf = malloc(buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
- if (ch->buf == NULL)
- goto error;
if (sndbuf_setup(b, ch->buf, buf_size) != 0)
goto error;
@@ -3256,31 +3254,27 @@ uaudio_mixer_add_ctl_sub(struct uaudio_softc *sc, struct uaudio_mixer_node *mc)
malloc(sizeof(*p_mc_new), M_USBDEV, M_WAITOK);
int ch;
- if (p_mc_new != NULL) {
- memcpy(p_mc_new, mc, sizeof(*p_mc_new));
- p_mc_new->next = sc->sc_mixer_root;
- sc->sc_mixer_root = p_mc_new;
- sc->sc_mixer_count++;
+ memcpy(p_mc_new, mc, sizeof(*p_mc_new));
+ p_mc_new->next = sc->sc_mixer_root;
+ sc->sc_mixer_root = p_mc_new;
+ sc->sc_mixer_count++;
- /* set default value for all channels */
- for (ch = 0; ch < p_mc_new->nchan; ch++) {
- switch (p_mc_new->val_default) {
- case 1:
- /* 50% */
- p_mc_new->wData[ch] = (p_mc_new->maxval + p_mc_new->minval) / 2;
- break;
- case 2:
- /* 100% */
- p_mc_new->wData[ch] = p_mc_new->maxval;
- break;
- default:
- /* 0% */
- p_mc_new->wData[ch] = p_mc_new->minval;
- break;
- }
+ /* set default value for all channels */
+ for (ch = 0; ch < p_mc_new->nchan; ch++) {
+ switch (p_mc_new->val_default) {
+ case 1:
+ /* 50% */
+ p_mc_new->wData[ch] = (p_mc_new->maxval + p_mc_new->minval) / 2;
+ break;
+ case 2:
+ /* 100% */
+ p_mc_new->wData[ch] = p_mc_new->maxval;
+ break;
+ default:
+ /* 0% */
+ p_mc_new->wData[ch] = p_mc_new->minval;
+ break;
}
- } else {
- DPRINTF("out of memory\n");
}
}
diff --git a/sys/dev/sume/if_sume.c b/sys/dev/sume/if_sume.c
index 9d2d5b907e02..94789a4aab34 100644
--- a/sys/dev/sume/if_sume.c
+++ b/sys/dev/sume/if_sume.c
@@ -1196,16 +1196,11 @@ sume_probe_riffa_buffer(const struct sume_adapter *adapter,
{
struct riffa_chnl_dir **rp;
bus_addr_t hw_addr;
- int error, ch;
+ int ch;
device_t dev = adapter->dev;
- error = ENOMEM;
*p = malloc(SUME_RIFFA_CHANNELS * sizeof(struct riffa_chnl_dir *),
M_SUME, M_ZERO | M_WAITOK);
- if (*p == NULL) {
- device_printf(dev, "malloc(%s) failed.\n", dir);
- return (error);
- }
rp = *p;
/* Allocate the chnl_dir structs themselves. */
@@ -1213,11 +1208,6 @@ sume_probe_riffa_buffer(const struct sume_adapter *adapter,
/* One direction. */
rp[ch] = malloc(sizeof(struct riffa_chnl_dir), M_SUME,
M_ZERO | M_WAITOK);
- if (rp[ch] == NULL) {
- device_printf(dev, "malloc(%s[%d]) riffa_chnl_dir "
- "failed.\n", dir, ch);
- return (error);
- }
int err = bus_dma_tag_create(bus_get_dma_tag(dev),
4, 0,
diff --git a/sys/dev/tpm/tpm20.c b/sys/dev/tpm/tpm20.c
index 80f7d9e105a6..876dd0bcc40d 100644
--- a/sys/dev/tpm/tpm20.c
+++ b/sys/dev/tpm/tpm20.c
@@ -206,6 +206,7 @@ tpm20_init(struct tpm_sc *sc)
tpm20_release(sc);
#ifdef TPM_HARVEST
+ random_harvest_register_source(RANDOM_PURE_TPM);
TIMEOUT_TASK_INIT(taskqueue_thread, &sc->harvest_task, 0,
tpm20_harvest, sc);
taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task, 0);
@@ -222,6 +223,7 @@ tpm20_release(struct tpm_sc *sc)
#ifdef TPM_HARVEST
if (device_is_attached(sc->dev))
taskqueue_drain_timeout(taskqueue_thread, &sc->harvest_task);
+ random_harvest_deregister_source(RANDOM_PURE_TPM);
#endif
if (sc->buf != NULL)
diff --git a/sys/dev/usb/input/wsp.c b/sys/dev/usb/input/wsp.c
index f1931c9e03c0..a8d6c14c7421 100644
--- a/sys/dev/usb/input/wsp.c
+++ b/sys/dev/usb/input/wsp.c
@@ -98,7 +98,10 @@ static struct wsp_tuning {
int pressure_untouch_threshold;
int pressure_tap_threshold;
int scr_hor_threshold;
+ int max_finger_area;
+ int max_double_tap_distance;
int enable_single_tap_clicks;
+ int enable_single_tap_movement;
}
wsp_tuning =
{
@@ -109,7 +112,10 @@ static struct wsp_tuning {
.pressure_untouch_threshold = 10,
.pressure_tap_threshold = 120,
.scr_hor_threshold = 20,
+ .max_finger_area = 1900,
+ .max_double_tap_distance = 2500,
.enable_single_tap_clicks = 1,
+ .enable_single_tap_movement = 1,
};
static void
@@ -121,8 +127,11 @@ wsp_runing_rangecheck(struct wsp_tuning *ptun)
WSP_CLAMP(ptun->pressure_touch_threshold, 1, 255);
WSP_CLAMP(ptun->pressure_untouch_threshold, 1, 255);
WSP_CLAMP(ptun->pressure_tap_threshold, 1, 255);
+ WSP_CLAMP(ptun->max_finger_area, 1, 2400);
+ WSP_CLAMP(ptun->max_double_tap_distance, 1, 16384);
WSP_CLAMP(ptun->scr_hor_threshold, 1, 255);
WSP_CLAMP(ptun->enable_single_tap_clicks, 0, 1);
+ WSP_CLAMP(ptun->enable_single_tap_movement, 0, 1);
}
SYSCTL_INT(_hw_usb_wsp, OID_AUTO, scale_factor, CTLFLAG_RWTUN,
@@ -137,10 +146,17 @@ SYSCTL_INT(_hw_usb_wsp, OID_AUTO, pressure_untouch_threshold, CTLFLAG_RWTUN,
&wsp_tuning.pressure_untouch_threshold, 0, "untouch pressure threshold");
SYSCTL_INT(_hw_usb_wsp, OID_AUTO, pressure_tap_threshold, CTLFLAG_RWTUN,
&wsp_tuning.pressure_tap_threshold, 0, "tap pressure threshold");
+SYSCTL_INT(_hw_usb_wsp, OID_AUTO, max_finger_area, CTLFLAG_RWTUN,
+ &wsp_tuning.max_finger_area, 0, "maximum finger area");
+SYSCTL_INT(_hw_usb_wsp, OID_AUTO, max_double_tap_distance, CTLFLAG_RWTUN,
+ &wsp_tuning.max_double_tap_distance, 0, "maximum double-finger click distance");
SYSCTL_INT(_hw_usb_wsp, OID_AUTO, scr_hor_threshold, CTLFLAG_RWTUN,
&wsp_tuning.scr_hor_threshold, 0, "horizontal scrolling threshold");
SYSCTL_INT(_hw_usb_wsp, OID_AUTO, enable_single_tap_clicks, CTLFLAG_RWTUN,
&wsp_tuning.enable_single_tap_clicks, 0, "enable single tap clicks");
+SYSCTL_INT(_hw_usb_wsp, OID_AUTO, enable_single_tap_movement, CTLFLAG_RWTUN,
+ &wsp_tuning.enable_single_tap_movement, 0, "enable single tap movement");
+
/*
* Some tables, structures, definitions and constant values for the
@@ -567,13 +583,13 @@ struct wsp_softc {
struct tp_finger *index[MAX_FINGERS]; /* finger index data */
int16_t pos_x[MAX_FINGERS]; /* position array */
int16_t pos_y[MAX_FINGERS]; /* position array */
+ int16_t pre_pos_x[MAX_FINGERS]; /* previous position array */
+ int16_t pre_pos_y[MAX_FINGERS]; /* previous position array */
u_int sc_touch; /* touch status */
#define WSP_UNTOUCH 0x00
#define WSP_FIRST_TOUCH 0x01
#define WSP_SECOND_TOUCH 0x02
#define WSP_TOUCHING 0x04
- int16_t pre_pos_x; /* previous position array */
- int16_t pre_pos_y; /* previous position array */
int dx_sum; /* x axis cumulative movement */
int dy_sum; /* y axis cumulative movement */
int dz_sum; /* z axis cumulative movement */
@@ -590,7 +606,6 @@ struct wsp_softc {
#define WSP_TAP_THRESHOLD 3
#define WSP_TAP_MAX_COUNT 20
int distance; /* the distance of 2 fingers */
-#define MAX_DISTANCE 2500 /* the max allowed distance */
uint8_t ibtn; /* button status in tapping */
uint8_t ntaps; /* finger status in tapping */
uint8_t scr_mode; /* scroll status in movement */
@@ -1034,13 +1049,35 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
sc->sc_status.obutton = sc->sc_status.button;
sc->sc_status.button = 0;
+ if (ntouch == 2) {
+ sc->distance = max(sc->distance, max(
+ abs(sc->pos_x[0] - sc->pos_x[1]),
+ abs(sc->pos_y[0] - sc->pos_y[1])));
+ }
+
if (ibt != 0) {
- if ((params->tp->caps & HAS_INTEGRATED_BUTTON) && ntouch == 2)
- sc->sc_status.button |= MOUSE_BUTTON3DOWN;
- else if ((params->tp->caps & HAS_INTEGRATED_BUTTON) && ntouch == 3)
- sc->sc_status.button |= MOUSE_BUTTON2DOWN;
- else
+ if (params->tp->caps & HAS_INTEGRATED_BUTTON) {
+ switch (ntouch) {
+ case 1:
+ sc->sc_status.button |= MOUSE_BUTTON1DOWN;
+ break;
+ case 2:
+ if (sc->distance < tun.max_double_tap_distance && abs(sc->dx_sum) < 5 &&
+ abs(sc->dy_sum) < 5)
+ sc->sc_status.button |= MOUSE_BUTTON3DOWN;
+ else
+ sc->sc_status.button |= MOUSE_BUTTON1DOWN;
+ break;
+ case 3:
+ sc->sc_status.button |= MOUSE_BUTTON2DOWN;
+ break;
+ default:
+ break;
+ }
+ } else {
sc->sc_status.button |= MOUSE_BUTTON1DOWN;
+ }
+
sc->ibtn = 1;
}
sc->intr_count++;
@@ -1049,7 +1086,7 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
switch (ntouch) {
case 1:
if (sc->index[0]->touch_major > tun.pressure_tap_threshold &&
- sc->index[0]->tool_major <= 1200)
+ sc->index[0]->tool_major <= tun.max_finger_area)
sc->ntaps = 1;
break;
case 2:
@@ -1067,11 +1104,7 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
break;
}
}
- if (ntouch == 2) {
- sc->distance = max(sc->distance, max(
- abs(sc->pos_x[0] - sc->pos_x[1]),
- abs(sc->pos_y[0] - sc->pos_y[1])));
- }
+
if (sc->index[0]->touch_major < tun.pressure_untouch_threshold &&
sc->sc_status.button == 0) {
sc->sc_touch = WSP_UNTOUCH;
@@ -1092,7 +1125,7 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
case 2:
DPRINTFN(WSP_LLEVEL_INFO, "sum_x=%5d, sum_y=%5d\n",
sc->dx_sum, sc->dy_sum);
- if (sc->distance < MAX_DISTANCE && abs(sc->dx_sum) < 5 &&
+ if (sc->distance < tun.max_double_tap_distance && abs(sc->dx_sum) < 5 &&
abs(sc->dy_sum) < 5) {
wsp_add_to_queue(sc, 0, 0, 0, MOUSE_BUTTON3DOWN);
DPRINTFN(WSP_LLEVEL_INFO, "RIGHT CLICK!\n");
@@ -1138,27 +1171,27 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
} else if (sc->index[0]->touch_major >= tun.pressure_touch_threshold &&
sc->sc_touch == WSP_FIRST_TOUCH) { /* ignore second touch */
sc->sc_touch = WSP_SECOND_TOUCH;
- DPRINTFN(WSP_LLEVEL_INFO, "Fist pre_x=%5d, pre_y=%5d\n",
- sc->pre_pos_x, sc->pre_pos_y);
+ DPRINTFN(WSP_LLEVEL_INFO, "First pre_x[0]=%5d, pre_y[0]=%5d\n",
+ sc->pre_pos_x[0], sc->pre_pos_y[0]);
} else {
if (sc->sc_touch == WSP_SECOND_TOUCH)
sc->sc_touch = WSP_TOUCHING;
if (ntouch != 0 &&
sc->index[0]->touch_major >= tun.pressure_touch_threshold) {
- dx = sc->pos_x[0] - sc->pre_pos_x;
- dy = sc->pos_y[0] - sc->pre_pos_y;
+ dx = sc->pos_x[0] - sc->pre_pos_x[0];
+ dy = sc->pos_y[0] - sc->pre_pos_y[0];
- /* Ignore movement during button is releasing */
- if (sc->ibtn != 0 && sc->sc_status.button == 0)
+ /* Optionally ignore movement during button is releasing */
+ if (tun.enable_single_tap_movement != 1 && sc->ibtn != 0 && sc->sc_status.button == 0)
dx = dy = 0;
/* Ignore movement if ntouch changed */
if (sc->o_ntouch != ntouch)
dx = dy = 0;
- /* Ignore unexpeted movement when typing */
- if (ntouch == 1 && sc->index[0]->tool_major > 1200)
+ /* Ignore unexpected movement when typing (palm detection) */
+ if (ntouch == 1 && sc->index[0]->tool_major > tun.max_finger_area)
dx = dy = 0;
if (sc->ibtn != 0 && ntouch == 1 &&
@@ -1167,8 +1200,8 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
dx = dy = 0;
if (ntouch == 2 && sc->sc_status.button != 0) {
- dx = sc->pos_x[sc->finger] - sc->pre_pos_x;
- dy = sc->pos_y[sc->finger] - sc->pre_pos_y;
+ dx = sc->pos_x[sc->finger] - sc->pre_pos_x[sc->finger];
+ dy = sc->pos_y[sc->finger] - sc->pre_pos_y[sc->finger];
/*
* Ignore movement of switch finger or
@@ -1230,9 +1263,7 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
dx = dy = 0;
if (sc->dz_count == 0)
dz = (sc->dz_sum / tun.z_factor) * (tun.z_invert ? -1 : 1);
- if (sc->scr_mode == WSP_SCR_HOR ||
- abs(sc->pos_x[0] - sc->pos_x[1]) > MAX_DISTANCE ||
- abs(sc->pos_y[0] - sc->pos_y[1]) > MAX_DISTANCE)
+ if (sc->scr_mode == WSP_SCR_HOR || sc->distance > tun.max_double_tap_distance)
dz = 0;
}
if (ntouch == 3)
@@ -1256,12 +1287,12 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
sc->rdz = 0;
}
}
- sc->pre_pos_x = sc->pos_x[0];
- sc->pre_pos_y = sc->pos_y[0];
+ sc->pre_pos_x[0] = sc->pos_x[0];
+ sc->pre_pos_y[0] = sc->pos_y[0];
if (ntouch == 2 && sc->sc_status.button != 0) {
- sc->pre_pos_x = sc->pos_x[sc->finger];
- sc->pre_pos_y = sc->pos_y[sc->finger];
+ sc->pre_pos_x[sc->finger] = sc->pos_x[sc->finger];
+ sc->pre_pos_y[sc->finger] = sc->pos_y[sc->finger];
}
sc->o_ntouch = ntouch;
diff --git a/sys/dev/usb/usb.h b/sys/dev/usb/usb.h
index 27da96e4446a..81ffa18102e5 100644
--- a/sys/dev/usb/usb.h
+++ b/sys/dev/usb/usb.h
@@ -114,7 +114,7 @@ MALLOC_DECLARE(M_USBDEV);
/* Allow for marginal and non-conforming devices. */
#define USB_PORT_RESET_DELAY 50 /* ms */
#define USB_PORT_ROOT_RESET_DELAY 200 /* ms */
-#define USB_PORT_RESET_RECOVERY 10 /* ms */
+#define USB_PORT_RESET_RECOVERY 20 /* ms */
#define USB_PORT_POWERUP_DELAY 300 /* ms */
#define USB_PORT_RESUME_DELAY (20*2) /* ms */
#define USB_SET_ADDRESS_SETTLE 10 /* ms */
diff --git a/sys/dev/vmm/vmm_dev.c b/sys/dev/vmm/vmm_dev.c
index dde3143b5741..56e6d11e017c 100644
--- a/sys/dev/vmm/vmm_dev.c
+++ b/sys/dev/vmm/vmm_dev.c
@@ -13,9 +13,9 @@
#include
#include
#include
-#include
#include
#include
+#include
#include
#include
#include
@@ -58,13 +58,12 @@ struct vmmdev_softc {
SLIST_HEAD(, devmem_softc) devmem;
int flags;
};
-#define VSC_LINKED 0x01
static SLIST_HEAD(, vmmdev_softc) head;
static unsigned pr_allow_flag;
-static struct mtx vmmdev_mtx;
-MTX_SYSINIT(vmmdev_mtx, &vmmdev_mtx, "vmm device mutex", MTX_DEF);
+static struct sx vmmdev_mtx;
+SX_SYSINIT(vmmdev_mtx, &vmmdev_mtx, "vmm device mutex");
static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev");
@@ -156,7 +155,7 @@ vmmdev_lookup(const char *name, struct ucred *cred)
{
struct vmmdev_softc *sc;
- mtx_assert(&vmmdev_mtx, MA_OWNED);
+ sx_assert(&vmmdev_mtx, SA_XLOCKED);
SLIST_FOREACH(sc, &head, link) {
if (strcmp(name, vm_name(sc->vm)) == 0)
@@ -186,10 +185,6 @@ vmmdev_rw(struct cdev *cdev, struct uio *uio, int flags)
void *hpa, *cookie;
struct vmmdev_softc *sc;
- error = vmm_priv_check(curthread->td_ucred);
- if (error)
- return (error);
-
sc = vmmdev_lookup2(cdev);
if (sc == NULL)
return (ENXIO);
@@ -327,6 +322,22 @@ vm_set_register_set(struct vcpu *vcpu, unsigned int count, int *regnum,
return (error);
}
+static int
+vmmdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
+{
+ int error;
+
+ /*
+ * A jail without vmm access shouldn't be able to access vmm device
+ * files at all, but check here just to be thorough.
+ */
+ error = vmm_priv_check(td->td_ucred);
+ if (error != 0)
+ return (error);
+
+ return (0);
+}
+
static const struct vmmdev_ioctl vmmdev_ioctls[] = {
VMMDEV_IOCTL(VM_GET_REGISTER, VMMDEV_IOCTL_LOCK_ONE_VCPU),
VMMDEV_IOCTL(VM_SET_REGISTER, VMMDEV_IOCTL_LOCK_ONE_VCPU),
@@ -375,10 +386,6 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
const struct vmmdev_ioctl *ioctl;
int error, vcpuid;
- error = vmm_priv_check(td->td_ucred);
- if (error)
- return (error);
-
sc = vmmdev_lookup2(cdev);
if (sc == NULL)
return (ENXIO);
@@ -681,10 +688,6 @@ vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t mapsize,
int error, found, segid;
bool sysmem;
- error = vmm_priv_check(curthread->td_ucred);
- if (error)
- return (error);
-
first = *offset;
last = first + mapsize;
if ((nprot & PROT_EXEC) || first < 0 || first >= last)
@@ -736,6 +739,8 @@ vmmdev_destroy(struct vmmdev_softc *sc)
struct devmem_softc *dsc;
int error __diagused;
+ KASSERT(sc->cdev == NULL, ("%s: cdev not free", __func__));
+
/*
* Destroy all cdevs:
*
@@ -745,7 +750,6 @@ vmmdev_destroy(struct vmmdev_softc *sc)
*/
SLIST_FOREACH(dsc, &sc->devmem, link) {
KASSERT(dsc->cdev != NULL, ("devmem cdev already destroyed"));
- destroy_dev(dsc->cdev);
devmem_destroy(dsc);
}
@@ -761,21 +765,15 @@ vmmdev_destroy(struct vmmdev_softc *sc)
free(dsc, M_VMMDEV);
}
- if (sc->cdev != NULL)
- destroy_dev(sc->cdev);
-
if (sc->vm != NULL)
vm_destroy(sc->vm);
if (sc->ucred != NULL)
crfree(sc->ucred);
- if ((sc->flags & VSC_LINKED) != 0) {
- mtx_lock(&vmmdev_mtx);
- SLIST_REMOVE(&head, sc, vmmdev_softc, link);
- mtx_unlock(&vmmdev_mtx);
- }
-
+ sx_xlock(&vmmdev_mtx);
+ SLIST_REMOVE(&head, sc, vmmdev_softc, link);
+ sx_xunlock(&vmmdev_mtx);
free(sc, M_VMMDEV);
}
@@ -785,10 +783,10 @@ vmmdev_lookup_and_destroy(const char *name, struct ucred *cred)
struct cdev *cdev;
struct vmmdev_softc *sc;
- mtx_lock(&vmmdev_mtx);
+ sx_xlock(&vmmdev_mtx);
sc = vmmdev_lookup(name, cred);
if (sc == NULL || sc->cdev == NULL) {
- mtx_unlock(&vmmdev_mtx);
+ sx_xunlock(&vmmdev_mtx);
return (EINVAL);
}
@@ -798,7 +796,7 @@ vmmdev_lookup_and_destroy(const char *name, struct ucred *cred)
*/
cdev = sc->cdev;
sc->cdev = NULL;
- mtx_unlock(&vmmdev_mtx);
+ sx_xunlock(&vmmdev_mtx);
destroy_dev(cdev);
vmmdev_destroy(sc);
@@ -833,6 +831,7 @@ SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy,
static struct cdevsw vmmdevsw = {
.d_name = "vmmdev",
.d_version = D_VERSION,
+ .d_open = vmmdev_open,
.d_ioctl = vmmdev_ioctl,
.d_mmap_single = vmmdev_mmap_single,
.d_read = vmmdev_rw,
@@ -854,50 +853,43 @@ vmmdev_alloc(struct vm *vm, struct ucred *cred)
static int
vmmdev_create(const char *name, struct ucred *cred)
{
+ struct make_dev_args mda;
struct cdev *cdev;
- struct vmmdev_softc *sc, *sc2;
+ struct vmmdev_softc *sc;
struct vm *vm;
int error;
- mtx_lock(&vmmdev_mtx);
+ sx_xlock(&vmmdev_mtx);
sc = vmmdev_lookup(name, cred);
- mtx_unlock(&vmmdev_mtx);
- if (sc != NULL)
+ if (sc != NULL) {
+ sx_xunlock(&vmmdev_mtx);
return (EEXIST);
+ }
error = vm_create(name, &vm);
- if (error != 0)
+ if (error != 0) {
+ sx_xunlock(&vmmdev_mtx);
return (error);
-
- sc = vmmdev_alloc(vm, cred);
-
- /*
- * Lookup the name again just in case somebody sneaked in when we
- * dropped the lock.
- */
- mtx_lock(&vmmdev_mtx);
- sc2 = vmmdev_lookup(name, cred);
- if (sc2 != NULL) {
- mtx_unlock(&vmmdev_mtx);
- vmmdev_destroy(sc);
- return (EEXIST);
}
- sc->flags |= VSC_LINKED;
+ sc = vmmdev_alloc(vm, cred);
SLIST_INSERT_HEAD(&head, sc, link);
- mtx_unlock(&vmmdev_mtx);
- error = make_dev_p(MAKEDEV_CHECKNAME, &cdev, &vmmdevsw, sc->ucred,
- UID_ROOT, GID_WHEEL, 0600, "vmm/%s", name);
+ make_dev_args_init(&mda);
+ mda.mda_devsw = &vmmdevsw;
+ mda.mda_cr = sc->ucred;
+ mda.mda_uid = UID_ROOT;
+ mda.mda_gid = GID_WHEEL;
+ mda.mda_mode = 0600;
+ mda.mda_si_drv1 = sc;
+ mda.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
+ error = make_dev_s(&mda, &cdev, "vmm/%s", name);
if (error != 0) {
+ sx_xunlock(&vmmdev_mtx);
vmmdev_destroy(sc);
return (error);
}
-
- mtx_lock(&vmmdev_mtx);
sc->cdev = cdev;
- sc->cdev->si_drv1 = sc;
- mtx_unlock(&vmmdev_mtx);
-
+ sx_xunlock(&vmmdev_mtx);
return (0);
}
@@ -990,39 +982,37 @@ static struct cdevsw devmemsw = {
static int
devmem_create_cdev(struct vmmdev_softc *sc, int segid, char *devname)
{
+ struct make_dev_args mda;
struct devmem_softc *dsc;
- struct cdev *cdev;
- const char *vmname;
int error;
- vmname = vm_name(sc->vm);
-
- error = make_dev_p(MAKEDEV_CHECKNAME, &cdev, &devmemsw, sc->ucred,
- UID_ROOT, GID_WHEEL, 0600, "vmm.io/%s.%s", vmname, devname);
- if (error)
- return (error);
+ sx_xlock(&vmmdev_mtx);
dsc = malloc(sizeof(struct devmem_softc), M_VMMDEV, M_WAITOK | M_ZERO);
-
- mtx_lock(&vmmdev_mtx);
- if (sc->cdev == NULL) {
- /* virtual machine is being created or destroyed */
- mtx_unlock(&vmmdev_mtx);
- free(dsc, M_VMMDEV);
- destroy_dev_sched_cb(cdev, NULL, 0);
- return (ENODEV);
- }
-
dsc->segid = segid;
dsc->name = devname;
- dsc->cdev = cdev;
dsc->sc = sc;
SLIST_INSERT_HEAD(&sc->devmem, dsc, link);
- mtx_unlock(&vmmdev_mtx);
- /* The 'cdev' is ready for use after 'si_drv1' is initialized */
- cdev->si_drv1 = dsc;
- return (0);
+ make_dev_args_init(&mda);
+ mda.mda_devsw = &devmemsw;
+ mda.mda_cr = sc->ucred;
+ mda.mda_uid = UID_ROOT;
+ mda.mda_gid = GID_WHEEL;
+ mda.mda_mode = 0600;
+ mda.mda_si_drv1 = dsc;
+ mda.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
+ error = make_dev_s(&mda, &dsc->cdev, "vmm.io/%s.%s", vm_name(sc->vm),
+ devname);
+ if (error != 0) {
+ SLIST_REMOVE(&sc->devmem, dsc, devmem_softc, link);
+ free(dsc->name, M_VMMDEV);
+ free(dsc, M_VMMDEV);
+ }
+
+ sx_xunlock(&vmmdev_mtx);
+
+ return (error);
}
static void
@@ -1030,7 +1020,7 @@ devmem_destroy(void *arg)
{
struct devmem_softc *dsc = arg;
- KASSERT(dsc->cdev, ("%s: devmem cdev already destroyed", __func__));
+ destroy_dev(dsc->cdev);
dsc->cdev = NULL;
dsc->sc = NULL;
}
diff --git a/sys/fs/udf/udf_vnops.c b/sys/fs/udf/udf_vnops.c
index 9c90d7a8a489..45668aae3c90 100644
--- a/sys/fs/udf/udf_vnops.c
+++ b/sys/fs/udf/udf_vnops.c
@@ -800,8 +800,6 @@ udf_readdir(struct vop_readdir_args *a)
*/
ncookies = uio->uio_resid / 8;
cookies = malloc(sizeof(*cookies) * ncookies, M_TEMP, M_WAITOK);
- if (cookies == NULL)
- return (ENOMEM);
uiodir.ncookies = ncookies;
uiodir.cookies = cookies;
uiodir.acookies = 0;
diff --git a/sys/geom/part/g_part.c b/sys/geom/part/g_part.c
index f0807c83dacf..d72efa09c46d 100644
--- a/sys/geom/part/g_part.c
+++ b/sys/geom/part/g_part.c
@@ -130,6 +130,7 @@ struct g_part_alias_list {
{ "solaris-home", G_PART_ALIAS_SOLARIS_HOME },
{ "solaris-altsec", G_PART_ALIAS_SOLARIS_ALTSEC },
{ "solaris-reserved", G_PART_ALIAS_SOLARIS_RESERVED },
+ { "u-boot-env", G_PART_ALIAS_U_BOOT_ENV },
{ "vmware-reserved", G_PART_ALIAS_VMRESERVED },
{ "vmware-vmfs", G_PART_ALIAS_VMFS },
{ "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG },
diff --git a/sys/geom/part/g_part.h b/sys/geom/part/g_part.h
index ffeeca9022fe..13bbb1e4126a 100644
--- a/sys/geom/part/g_part.h
+++ b/sys/geom/part/g_part.h
@@ -103,6 +103,7 @@ enum g_part_alias {
G_PART_ALIAS_SOLARIS_HOME, /* A Solaris /home partition entry. */
G_PART_ALIAS_SOLARIS_ALTSEC, /* A Solaris alternate sector partition entry. */
G_PART_ALIAS_SOLARIS_RESERVED, /* A Solaris reserved partition entry. */
+ G_PART_ALIAS_U_BOOT_ENV, /* A U-Boot environment partition entry. */
G_PART_ALIAS_VMFS, /* A VMware VMFS partition entry */
G_PART_ALIAS_VMKDIAG, /* A VMware vmkDiagnostic partition entry */
G_PART_ALIAS_VMRESERVED, /* A VMware reserved partition entry */
diff --git a/sys/geom/part/g_part_gpt.c b/sys/geom/part/g_part_gpt.c
index b269faee276c..a856a34f99d9 100644
--- a/sys/geom/part/g_part_gpt.c
+++ b/sys/geom/part/g_part_gpt.c
@@ -223,6 +223,7 @@ static struct uuid gpt_uuid_solaris_var = GPT_ENT_TYPE_SOLARIS_VAR;
static struct uuid gpt_uuid_solaris_home = GPT_ENT_TYPE_SOLARIS_HOME;
static struct uuid gpt_uuid_solaris_altsec = GPT_ENT_TYPE_SOLARIS_ALTSEC;
static struct uuid gpt_uuid_solaris_reserved = GPT_ENT_TYPE_SOLARIS_RESERVED;
+static struct uuid gpt_uuid_u_boot_env = GPT_ENT_TYPE_U_BOOT_ENV;
static struct uuid gpt_uuid_unused = GPT_ENT_TYPE_UNUSED;
static struct uuid gpt_uuid_vmfs = GPT_ENT_TYPE_VMFS;
static struct uuid gpt_uuid_vmkdiag = GPT_ENT_TYPE_VMKDIAG;
@@ -295,6 +296,7 @@ static struct g_part_uuid_alias {
{ &gpt_uuid_solaris_home, G_PART_ALIAS_SOLARIS_HOME, 0 },
{ &gpt_uuid_solaris_altsec, G_PART_ALIAS_SOLARIS_ALTSEC, 0 },
{ &gpt_uuid_solaris_reserved, G_PART_ALIAS_SOLARIS_RESERVED, 0 },
+ { &gpt_uuid_u_boot_env, G_PART_ALIAS_U_BOOT_ENV, 0 },
{ &gpt_uuid_vmfs, G_PART_ALIAS_VMFS, 0 },
{ &gpt_uuid_vmkdiag, G_PART_ALIAS_VMKDIAG, 0 },
{ &gpt_uuid_vmreserved, G_PART_ALIAS_VMRESERVED, 0 },
diff --git a/sys/kern/kern_boottrace.c b/sys/kern/kern_boottrace.c
index 1b097e7378ad..5516f3160587 100644
--- a/sys/kern/kern_boottrace.c
+++ b/sys/kern/kern_boottrace.c
@@ -561,9 +561,6 @@ boottrace_resize(u_int newsize)
}
rt.table = realloc(rt.table, newsize * sizeof(struct bt_event),
M_BOOTTRACE, M_WAITOK | M_ZERO);
- if (rt.table == NULL)
- return (ENOMEM);
-
rt.size = newsize;
boottrace_reset("boottrace_resize");
return (0);
diff --git a/sys/kern/kern_fail.c b/sys/kern/kern_fail.c
index 883b664aef0d..258268bb874f 100644
--- a/sys/kern/kern_fail.c
+++ b/sys/kern/kern_fail.c
@@ -479,11 +479,10 @@ fail_point_init(struct fail_point *fp, const char *fmt, ...)
/* Allocate the name and fill it in. */
name = fp_malloc(n + 1, M_WAITOK);
- if (name != NULL) {
- va_start(ap, fmt);
- vsnprintf(name, n + 1, fmt, ap);
- va_end(ap);
- }
+ va_start(ap, fmt);
+ vsnprintf(name, n + 1, fmt, ap);
+ va_end(ap);
+
fp->fp_name = name;
fp->fp_location = "";
fp->fp_flags |= FAIL_POINT_DYNAMIC_NAME;
diff --git a/sys/kern/kern_ntptime.c b/sys/kern/kern_ntptime.c
index 324bcc8ec3a0..45153e0b782c 100644
--- a/sys/kern/kern_ntptime.c
+++ b/sys/kern/kern_ntptime.c
@@ -193,7 +193,7 @@ static l_fp pps_freq; /* scaled frequency offset (ns/s) */
static long pps_fcount; /* frequency accumulator */
static long pps_jitter; /* nominal jitter (ns) */
static long pps_stabil; /* nominal stability (scaled ns/s) */
-static long pps_lastsec; /* time at last calibration (s) */
+static time_t pps_lastsec; /* time at last calibration (s) */
static int pps_valid; /* signal watchdog counter */
static int pps_shift = PPS_FAVG; /* interval duration (s) (shift) */
static int pps_shiftmax = PPS_FAVGDEF; /* max interval duration (s) (shift) */
@@ -749,7 +749,8 @@ hardupdate(long offset /* clock offset (ns) */)
void
hardpps(struct timespec *tsp, long delta_nsec)
{
- long u_sec, u_nsec, v_nsec; /* temps */
+ long u_nsec, v_nsec; /* temps */
+ time_t u_sec;
l_fp ftemp;
NTP_LOCK();
diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c
index ad31af0d5731..5930d89cbf91 100644
--- a/sys/kern/subr_bus.c
+++ b/sys/kern/subr_bus.c
@@ -5457,8 +5457,6 @@ sysctl_devices(SYSCTL_HANDLER_ARGS)
* Populate the return item, careful not to overflow the buffer.
*/
udev = malloc(sizeof(*udev), M_BUS, M_WAITOK | M_ZERO);
- if (udev == NULL)
- return (ENOMEM);
udev->dv_handle = (uintptr_t)dev;
udev->dv_parent = (uintptr_t)dev->parent;
udev->dv_devflags = dev->devflags;
diff --git a/sys/kern/subr_prf.c b/sys/kern/subr_prf.c
index 92777d848add..2ff00e0511b2 100644
--- a/sys/kern/subr_prf.c
+++ b/sys/kern/subr_prf.c
@@ -664,9 +664,9 @@ kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_lis
char *d;
const char *p, *percent, *q;
u_char *up;
- int ch, n;
+ int ch, n, sign;
uintmax_t num;
- int base, lflag, qflag, tmp, width, ladjust, sharpflag, neg, sign, dot;
+ int base, lflag, qflag, tmp, width, ladjust, sharpflag, dot;
int cflag, hflag, jflag, tflag, zflag;
int bconv, dwidth, upper;
char padc;
@@ -697,7 +697,7 @@ kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_lis
PCHAR(ch);
}
percent = fmt - 1;
- qflag = 0; lflag = 0; ladjust = 0; sharpflag = 0; neg = 0;
+ qflag = 0; lflag = 0; ladjust = 0; sharpflag = 0;
sign = 0; dot = 0; bconv = 0; dwidth = 0; upper = 0;
cflag = 0; hflag = 0; jflag = 0; tflag = 0; zflag = 0;
reswitch: switch (ch = (u_char)*fmt++) {
@@ -708,7 +708,7 @@ reswitch: switch (ch = (u_char)*fmt++) {
sharpflag = 1;
goto reswitch;
case '+':
- sign = 1;
+ sign = '+';
goto reswitch;
case '-':
ladjust = 1;
@@ -778,7 +778,6 @@ reswitch: switch (ch = (u_char)*fmt++) {
case 'd':
case 'i':
base = 10;
- sign = 1;
goto handle_sign;
case 'h':
if (hflag) {
@@ -969,8 +968,10 @@ reswitch: switch (ch = (u_char)*fmt++) {
goto reswitch;
case 'r':
base = radix;
- if (sign)
+ if (sign) {
+ sign = 0;
goto handle_sign;
+ }
goto handle_nosign;
case 's':
p = va_arg(ap, char *);
@@ -1007,13 +1008,11 @@ reswitch: switch (ch = (u_char)*fmt++) {
goto handle_nosign;
case 'y':
base = 16;
- sign = 1;
goto handle_sign;
case 'z':
zflag = 1;
goto reswitch;
handle_nosign:
- sign = 0;
if (jflag)
num = va_arg(ap, uintmax_t);
else if (qflag)
@@ -1052,11 +1051,11 @@ reswitch: switch (ch = (u_char)*fmt++) {
num = (signed char)va_arg(ap, int);
else
num = va_arg(ap, int);
-number:
- if (sign && (intmax_t)num < 0) {
- neg = 1;
+ if ((intmax_t)num < 0) {
+ sign = '-';
num = -(intmax_t)num;
}
+number:
p = ksprintn(nbuf, num, base, &n, upper);
tmp = 0;
if (sharpflag && num != 0) {
@@ -1065,7 +1064,7 @@ reswitch: switch (ch = (u_char)*fmt++) {
else if (base == 16)
tmp += 2;
}
- if (neg)
+ if (sign)
tmp++;
if (!ladjust && padc == '0')
@@ -1075,8 +1074,8 @@ reswitch: switch (ch = (u_char)*fmt++) {
if (!ladjust)
while (width-- > 0)
PCHAR(' ');
- if (neg)
- PCHAR('-');
+ if (sign)
+ PCHAR(sign);
if (sharpflag && num != 0) {
if (base == 8) {
PCHAR('0');
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index d647a4beeeff..1a8d62e38e1a 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -113,10 +113,10 @@ struct vop_vector default_vnodeops = {
.vop_fsync = VOP_NULL,
.vop_stat = vop_stdstat,
.vop_fdatasync = vop_stdfdatasync,
- .vop_getlowvnode = vop_stdgetlowvnode,
+ .vop_getlowvnode = vop_stdgetlowvnode,
.vop_getpages = vop_stdgetpages,
.vop_getpages_async = vop_stdgetpages_async,
- .vop_getwritemount = vop_stdgetwritemount,
+ .vop_getwritemount = vop_stdgetwritemount,
.vop_inactive = VOP_NULL,
.vop_need_inactive = vop_stdneed_inactive,
.vop_ioctl = vop_stdioctl,
@@ -1060,8 +1060,8 @@ vop_stdadvise(struct vop_advise_args *ap)
{
struct vnode *vp;
struct bufobj *bo;
+ uintmax_t bstart, bend;
daddr_t startn, endn;
- off_t bstart, bend, start, end;
int bsize, error;
vp = ap->a_vp;
@@ -1084,7 +1084,7 @@ vop_stdadvise(struct vop_advise_args *ap)
/*
* Round to block boundaries (and later possibly further to
- * page boundaries). Applications cannot reasonably be aware
+ * page boundaries). Applications cannot reasonably be aware
* of the boundaries, and the rounding must be to expand at
* both extremities to cover enough. It still doesn't cover
* read-ahead. For partial blocks, this gives unnecessary
@@ -1093,7 +1093,8 @@ vop_stdadvise(struct vop_advise_args *ap)
*/
bsize = vp->v_bufobj.bo_bsize;
bstart = rounddown(ap->a_start, bsize);
- bend = roundup(ap->a_end, bsize);
+ bend = ap->a_end;
+ bend = roundup(bend, bsize);
/*
* Deactivate pages in the specified range from the backing VM
@@ -1102,18 +1103,17 @@ vop_stdadvise(struct vop_advise_args *ap)
* below.
*/
if (vp->v_object != NULL) {
- start = trunc_page(bstart);
- end = round_page(bend);
VM_OBJECT_RLOCK(vp->v_object);
- vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start),
- OFF_TO_IDX(end));
+ vm_object_page_noreuse(vp->v_object,
+ OFF_TO_IDX(trunc_page(bstart)),
+ OFF_TO_IDX(round_page(bend)));
VM_OBJECT_RUNLOCK(vp->v_object);
}
bo = &vp->v_bufobj;
- BO_RLOCK(bo);
startn = bstart / bsize;
endn = bend / bsize;
+ BO_RLOCK(bo);
error = bnoreuselist(&bo->bo_clean, bo, startn, endn);
if (error == 0)
error = bnoreuselist(&bo->bo_dirty, bo, startn, endn);
diff --git a/sys/net/altq/altq_subr.c b/sys/net/altq/altq_subr.c
index 3ade724818dd..534841289611 100644
--- a/sys/net/altq/altq_subr.c
+++ b/sys/net/altq/altq_subr.c
@@ -1327,12 +1327,7 @@ acc_add_filter(classifier, filter, class, phandle)
return (EINVAL);
#endif
- afp = malloc(sizeof(struct acc_filter),
- M_DEVBUF, M_WAITOK);
- if (afp == NULL)
- return (ENOMEM);
- bzero(afp, sizeof(struct acc_filter));
-
+ afp = malloc(sizeof(*afp), M_DEVBUF, M_WAITOK | M_ZERO);
afp->f_filter = *filter;
afp->f_class = class;
diff --git a/sys/net/dummymbuf.c b/sys/net/dummymbuf.c
index 8c46421888ed..d4ba00b13235 100644
--- a/sys/net/dummymbuf.c
+++ b/sys/net/dummymbuf.c
@@ -74,7 +74,7 @@ dmb_sysctl_handle_rules(SYSCTL_HANDLER_ARGS)
char **rulesp = (char **)arg1;
if (req->newptr == NULL) {
- // read only
+ /* read only */
DMB_RULES_SLOCK();
arg1 = *rulesp;
if (arg1 == NULL) {
@@ -84,10 +84,12 @@ dmb_sysctl_handle_rules(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_string(oidp, arg1, arg2, req);
DMB_RULES_SUNLOCK();
} else {
- // read and write
+ /* read and write */
DMB_RULES_XLOCK();
- if (*rulesp == NULL)
- *rulesp = malloc(arg2, M_DUMMYMBUF_RULES, M_WAITOK);
+ if (*rulesp == NULL) {
+ *rulesp = malloc(arg2, M_DUMMYMBUF_RULES,
+ M_WAITOK | M_ZERO);
+ }
arg1 = *rulesp;
error = sysctl_handle_string(oidp, arg1, arg2, req);
DMB_RULES_XUNLOCK();
@@ -99,8 +101,7 @@ dmb_sysctl_handle_rules(SYSCTL_HANDLER_ARGS)
SYSCTL_PROC(_net_dummymbuf, OID_AUTO, rules,
CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_RW | CTLFLAG_VNET,
&VNET_NAME(dmb_rules), RULES_MAXLEN, dmb_sysctl_handle_rules, "A",
- "{inet | inet6 | ethernet} {in | out} [ ];"
- " ...;");
+ "{inet | inet6 | ethernet} {in | out} []; ...;");
/*
* Statistics
diff --git a/sys/net/if.c b/sys/net/if.c
index 9af386f09563..a7029a54229e 100644
--- a/sys/net/if.c
+++ b/sys/net/if.c
@@ -2751,7 +2751,12 @@ ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td)
(ifp->if_flags & IFF_UP) == 0) {
do_ifup = 1;
}
- /* See if permanently promiscuous mode bit is about to flip */
+
+ /*
+ * See if the promiscuous mode or allmulti bits are about to
+ * flip. They require special handling because in-kernel
+ * consumers may indepdently toggle them.
+ */
if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) {
if (new_flags & IFF_PPROMISC)
ifp->if_flags |= IFF_PROMISC;
@@ -2762,6 +2767,12 @@ ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td)
((new_flags & IFF_PPROMISC) ?
"enabled" : "disabled"));
}
+ if ((ifp->if_flags ^ new_flags) & IFF_PALLMULTI) {
+ if (new_flags & IFF_PALLMULTI)
+ ifp->if_flags |= IFF_ALLMULTI;
+ else if (ifp->if_amcount == 0)
+ ifp->if_flags &= ~IFF_ALLMULTI;
+ }
ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
(new_flags &~ IFF_CANTCHANGE);
if (ifp->if_ioctl) {
@@ -3768,7 +3779,8 @@ int
if_allmulti(struct ifnet *ifp, int onswitch)
{
- return (if_setflag(ifp, IFF_ALLMULTI, 0, &ifp->if_amcount, onswitch));
+ return (if_setflag(ifp, IFF_ALLMULTI, IFF_PALLMULTI, &ifp->if_amcount,
+ onswitch));
}
struct ifmultiaddr *
@@ -5165,12 +5177,6 @@ if_getifaddr(const if_t ifp)
return (ifp->if_addr);
}
-int
-if_getamcount(const if_t ifp)
-{
- return (ifp->if_amcount);
-}
-
int
if_setsendqready(if_t ifp)
{
diff --git a/sys/net/if.h b/sys/net/if.h
index 9c388866d6d6..39b3abba614b 100644
--- a/sys/net/if.h
+++ b/sys/net/if.h
@@ -160,7 +160,7 @@ struct if_data {
#define IFF_STICKYARP 0x100000 /* (n) sticky ARP */
#define IFF_DYING 0x200000 /* (n) interface is winding down */
#define IFF_RENAMING 0x400000 /* (n) interface is being renamed */
-#define IFF_SPARE 0x800000
+#define IFF_PALLMULTI 0x800000 /* (n) user-requested allmulti mode */
#define IFF_NETLINK_1 0x1000000 /* (n) used by netlink */
/*
diff --git a/sys/net/if_ovpn.c b/sys/net/if_ovpn.c
index f6f640a65f61..ee097cfa24b3 100644
--- a/sys/net/if_ovpn.c
+++ b/sys/net/if_ovpn.c
@@ -2115,6 +2115,12 @@ ovpn_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
sc = ifp->if_softc;
+ m = m_unshare(m, M_NOWAIT);
+ if (m == NULL) {
+ OVPN_COUNTER_ADD(sc, lost_data_pkts_out, 1);
+ return (ENOBUFS);
+ }
+
OVPN_RLOCK(sc);
SDT_PROBE1(if_ovpn, tx, transmit, start, m);
@@ -2233,6 +2239,12 @@ ovpn_udp_input(struct mbuf *m, int off, struct inpcb *inp,
M_ASSERTPKTHDR(m);
+ m = m_unshare(m, M_NOWAIT);
+ if (m == NULL) {
+ OVPN_COUNTER_ADD(sc, nomem_data_pkts_in, 1);
+ return (true);
+ }
+
OVPN_COUNTER_ADD(sc, transport_bytes_received, m->m_pkthdr.len - off);
ohdrlen = sizeof(*ohdr) - sizeof(ohdr->auth_tag);
diff --git a/sys/net/if_var.h b/sys/net/if_var.h
index e16e4a94ed71..e293b81476cb 100644
--- a/sys/net/if_var.h
+++ b/sys/net/if_var.h
@@ -675,7 +675,6 @@ u_int if_lladdr_count(if_t);
u_int if_llmaddr_count(if_t);
bool if_maddr_empty(if_t);
-int if_getamcount(const if_t ifp);
struct ifaddr * if_getifaddr(const if_t ifp);
typedef u_int if_addr_cb_t(void *, struct ifaddr *, u_int);
u_int if_foreach_addr_type(if_t ifp, int type, if_addr_cb_t cb, void *cb_arg);
diff --git a/sys/net/if_vlan.c b/sys/net/if_vlan.c
index 01be7ab2d13b..3fbf0c7dc7a8 100644
--- a/sys/net/if_vlan.c
+++ b/sys/net/if_vlan.c
@@ -509,11 +509,6 @@ vlan_growhash(struct ifvlantrunk *trunk, int howmuch)
return;
hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_WAITOK);
- if (hash2 == NULL) {
- printf("%s: out of memory -- hash size not changed\n",
- __func__);
- return; /* We can live with the old hash table */
- }
for (j = 0; j < n2; j++)
CK_SLIST_INIT(&hash2[j]);
for (i = 0; i < n; i++)
diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h
index 6d08f4076e17..9bd899e9898a 100644
--- a/sys/net/pfvar.h
+++ b/sys/net/pfvar.h
@@ -374,8 +374,8 @@ struct pfi_dynaddr {
mtx_unlock(_s->lock); \
} while (0)
#else
-#define PF_STATE_LOCK(s) mtx_lock(s->lock)
-#define PF_STATE_UNLOCK(s) mtx_unlock(s->lock)
+#define PF_STATE_LOCK(s) mtx_lock((s)->lock)
+#define PF_STATE_UNLOCK(s) mtx_unlock((s)->lock)
#endif
#ifdef INVARIANTS
@@ -940,6 +940,29 @@ struct pf_state_peer {
u_int8_t pad[1];
};
+/* Keep synced with struct pf_udp_endpoint. */
+struct pf_udp_endpoint_cmp {
+ struct pf_addr addr;
+ uint16_t port;
+ sa_family_t af;
+ uint8_t pad[1];
+};
+
+struct pf_udp_endpoint {
+ struct pf_addr addr;
+ uint16_t port;
+ sa_family_t af;
+ uint8_t pad[1];
+
+ struct pf_udp_mapping *mapping;
+ LIST_ENTRY(pf_udp_endpoint) entry;
+};
+
+struct pf_udp_mapping {
+ struct pf_udp_endpoint endpoints[2];
+ u_int refs;
+};
+
/* Keep synced with struct pf_state_key. */
struct pf_state_key_cmp {
struct pf_addr addr[2];
@@ -1069,6 +1092,7 @@ struct pf_kstate {
union pf_krule_ptr nat_rule;
struct pf_addr rt_addr;
struct pf_state_key *key[2]; /* addresses stack and wire */
+ struct pf_udp_mapping *udp_mapping;
struct pfi_kkif *kif;
struct pfi_kkif *orig_kif; /* The real kif, even if we're a floating state (i.e. if == V_pfi_all). */
struct pfi_kkif *rt_kif;
@@ -2126,17 +2150,28 @@ struct pf_idhash {
struct mtx lock;
};
+struct pf_udpendpointhash {
+ LIST_HEAD(, pf_udp_endpoint) endpoints;
+ /* refcont is synchronized on the source endpoint's row lock */
+ struct mtx lock;
+};
+
extern u_long pf_ioctl_maxcount;
VNET_DECLARE(u_long, pf_hashmask);
#define V_pf_hashmask VNET(pf_hashmask)
VNET_DECLARE(u_long, pf_srchashmask);
#define V_pf_srchashmask VNET(pf_srchashmask)
+VNET_DECLARE(u_long, pf_udpendpointhashmask);
+#define V_pf_udpendpointhashmask VNET(pf_udpendpointhashmask)
#define PF_HASHSIZ (131072)
#define PF_SRCHASHSIZ (PF_HASHSIZ/4)
+#define PF_UDPENDHASHSIZ (PF_HASHSIZ/4)
VNET_DECLARE(struct pf_keyhash *, pf_keyhash);
VNET_DECLARE(struct pf_idhash *, pf_idhash);
+VNET_DECLARE(struct pf_udpendpointhash *, pf_udpendpointhash);
#define V_pf_keyhash VNET(pf_keyhash)
#define V_pf_idhash VNET(pf_idhash)
+#define V_pf_udpendpointhash VNET(pf_udpendpointhash)
VNET_DECLARE(struct pf_srchash *, pf_srchash);
#define V_pf_srchash VNET(pf_srchash)
@@ -2211,6 +2246,8 @@ VNET_DECLARE(uma_zone_t, pf_state_z);
#define V_pf_state_z VNET(pf_state_z)
VNET_DECLARE(uma_zone_t, pf_state_key_z);
#define V_pf_state_key_z VNET(pf_state_key_z)
+VNET_DECLARE(uma_zone_t, pf_udp_mapping_z);
+#define V_pf_udp_mapping_z VNET(pf_udp_mapping_z)
VNET_DECLARE(uma_zone_t, pf_state_scrub_z);
#define V_pf_state_scrub_z VNET(pf_state_scrub_z)
@@ -2283,6 +2320,15 @@ extern struct pf_kstate *pf_find_state_all(
extern bool pf_find_state_all_exists(
const struct pf_state_key_cmp *,
u_int);
+extern struct pf_udp_mapping *pf_udp_mapping_find(struct pf_udp_endpoint_cmp
+ *endpoint);
+extern struct pf_udp_mapping *pf_udp_mapping_create(sa_family_t af,
+ struct pf_addr *src_addr, uint16_t src_port,
+ struct pf_addr *nat_addr, uint16_t nat_port);
+extern int pf_udp_mapping_insert(struct pf_udp_mapping
+ *mapping);
+extern void pf_udp_mapping_release(struct pf_udp_mapping
+ *mapping);
extern struct pf_ksrc_node *pf_find_src_node(struct pf_addr *,
struct pf_krule *, sa_family_t,
struct pf_srchash **, bool);
@@ -2576,10 +2622,11 @@ u_short pf_get_translation(struct pf_pdesc *, struct mbuf *,
struct pf_state_key **, struct pf_state_key **,
struct pf_addr *, struct pf_addr *,
uint16_t, uint16_t, struct pf_kanchor_stackframe *,
- struct pf_krule **);
+ struct pf_krule **,
+ struct pf_udp_mapping **udp_mapping);
-struct pf_state_key *pf_state_key_setup(struct pf_pdesc *, struct pf_addr *,
- struct pf_addr *, u_int16_t, u_int16_t);
+struct pf_state_key *pf_state_key_setup(struct pf_pdesc *, struct mbuf *, int,
+ struct pf_addr *, struct pf_addr *, u_int16_t, u_int16_t);
struct pf_state_key *pf_state_key_clone(const struct pf_state_key *);
void pf_rule_to_actions(struct pf_krule *,
struct pf_rule_actions *);
diff --git a/sys/netinet/ip_carp.c b/sys/netinet/ip_carp.c
index e03cd472259e..1960e3405abc 100644
--- a/sys/netinet/ip_carp.c
+++ b/sys/netinet/ip_carp.c
@@ -516,7 +516,7 @@ static int
carp_input(struct mbuf **mp, int *offp, int proto)
{
struct mbuf *m = *mp;
- struct ip *ip = mtod(m, struct ip *);
+ struct ip *ip;
struct vrrpv3_header *vh;
int iplen;
int minlen;
@@ -532,9 +532,6 @@ carp_input(struct mbuf **mp, int *offp, int proto)
return (IPPROTO_DONE);
}
- iplen = ip->ip_hl << 2;
- totlen = ntohs(ip->ip_len);
-
/* Ensure we have enough header to figure out the version. */
if (m->m_pkthdr.len < iplen + sizeof(*vh)) {
CARPSTATS_INC(carps_badlen);
@@ -545,14 +542,15 @@ carp_input(struct mbuf **mp, int *offp, int proto)
return (IPPROTO_DONE);
}
- if (iplen + sizeof(*vh) < m->m_len) {
+ if (m->m_len < iplen + sizeof(*vh)) {
if ((m = m_pullup(m, iplen + sizeof(*vh))) == NULL) {
CARPSTATS_INC(carps_hdrops);
CARP_DEBUG("%s():%d: pullup failed\n", __func__, __LINE__);
return (IPPROTO_DONE);
}
- ip = mtod(m, struct ip *);
}
+ ip = mtod(m, struct ip *);
+ totlen = ntohs(ip->ip_len);
vh = (struct vrrpv3_header *)((char *)ip + iplen);
switch (vh->vrrp_version) {
@@ -581,7 +579,7 @@ carp_input(struct mbuf **mp, int *offp, int proto)
return (IPPROTO_DONE);
}
- if (iplen + minlen < m->m_len) {
+ if (m->m_len < iplen + minlen) {
if ((m = m_pullup(m, iplen + minlen)) == NULL) {
CARPSTATS_INC(carps_hdrops);
CARP_DEBUG("%s():%d: pullup failed\n", __func__, __LINE__);
@@ -596,15 +594,13 @@ carp_input(struct mbuf **mp, int *offp, int proto)
struct carp_header *ch;
/* verify the CARP checksum */
- m->m_data += iplen;
- if (in_cksum(m, totlen - iplen)) {
+ if (in_cksum_skip(m, totlen, iplen)) {
CARPSTATS_INC(carps_badsum);
CARP_DEBUG("%s: checksum failed on %s\n", __func__,
if_name(m->m_pkthdr.rcvif));
m_freem(m);
break;
}
- m->m_data -= iplen;
ch = (struct carp_header *)((char *)ip + iplen);
carp_input_c(m, ch, AF_INET, ip->ip_ttl);
break;
@@ -689,7 +685,7 @@ carp6_input(struct mbuf **mp, int *offp, int proto)
return (IPPROTO_DONE);
}
- if (sizeof (*ip6) + minlen < m->m_len) {
+ if (m->m_len < sizeof(*ip6) + minlen) {
if ((m = m_pullup(m, sizeof(*ip6) + minlen)) == NULL) {
CARPSTATS_INC(carps_hdrops);
CARP_DEBUG("%s():%d: pullup failed\n", __func__, __LINE__);
@@ -704,15 +700,14 @@ carp6_input(struct mbuf **mp, int *offp, int proto)
struct carp_header *ch;
/* verify the CARP checksum */
- m->m_data += *offp;
- if (in_cksum(m, sizeof(struct carp_header))) {
+ if (in_cksum_skip(m, *offp + sizeof(struct carp_header),
+ *offp)) {
CARPSTATS_INC(carps_badsum);
CARP_DEBUG("%s: checksum failed, on %s\n", __func__,
if_name(m->m_pkthdr.rcvif));
m_freem(m);
break;
}
- m->m_data -= *offp;
ch = (struct carp_header *)((char *)ip6 + sizeof(*ip6));
carp_input_c(m, ch, AF_INET6, ip6->ip6_hlim);
break;
diff --git a/sys/netinet/tcp_lro.c b/sys/netinet/tcp_lro.c
index 906e01257a04..10afed17bf3b 100644
--- a/sys/netinet/tcp_lro.c
+++ b/sys/netinet/tcp_lro.c
@@ -83,6 +83,7 @@ static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures");
static void tcp_lro_rx_done(struct lro_ctrl *lc);
static int tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m,
uint32_t csum, bool use_hash);
+static void tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le);
SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"TCP LRO");
@@ -1104,7 +1105,7 @@ tcp_lro_condense(struct lro_ctrl *lc, struct lro_entry *le)
}
}
-void
+static void
tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
{
diff --git a/sys/netinet/tcp_lro.h b/sys/netinet/tcp_lro.h
index b4b5e3f811e4..a94eca665eb5 100644
--- a/sys/netinet/tcp_lro.h
+++ b/sys/netinet/tcp_lro.h
@@ -216,7 +216,6 @@ int tcp_lro_init(struct lro_ctrl *);
int tcp_lro_init_args(struct lro_ctrl *, struct ifnet *, unsigned, unsigned);
void tcp_lro_free(struct lro_ctrl *);
void tcp_lro_flush_inactive(struct lro_ctrl *, const struct timeval *);
-void tcp_lro_flush(struct lro_ctrl *, struct lro_entry *);
void tcp_lro_flush_all(struct lro_ctrl *);
extern int (*tcp_lro_flush_tcphpts)(struct lro_ctrl *, struct lro_entry *);
int tcp_lro_rx(struct lro_ctrl *, struct mbuf *, uint32_t);
diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c
index 33a6a66b7138..d0a7690256f4 100644
--- a/sys/netinet/tcp_syncache.c
+++ b/sys/netinet/tcp_syncache.c
@@ -527,10 +527,16 @@ syncache_timer(void *xsch)
}
NET_EPOCH_ENTER(et);
- syncache_respond(sc, NULL, TH_SYN|TH_ACK);
+ if (syncache_respond(sc, NULL, TH_SYN|TH_ACK) == 0) {
+ syncache_timeout(sc, sch, 0);
+ TCPSTAT_INC(tcps_sndacks);
+ TCPSTAT_INC(tcps_sndtotal);
+ TCPSTAT_INC(tcps_sc_retransmitted);
+ } else {
+ syncache_drop(sc, sch);
+ TCPSTAT_INC(tcps_sc_dropped);
+ }
NET_EPOCH_EXIT(et);
- TCPSTAT_INC(tcps_sc_retransmitted);
- syncache_timeout(sc, sch, 0);
}
if (!TAILQ_EMPTY(&(sch)->sch_bucket))
callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick,
@@ -688,7 +694,13 @@ syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th, struct mbuf *m,
"sending challenge ACK\n",
s, __func__,
th->th_seq, sc->sc_irs + 1, sc->sc_wnd);
- syncache_respond(sc, m, TH_ACK);
+ if (syncache_respond(sc, m, TH_ACK) == 0) {
+ TCPSTAT_INC(tcps_sndacks);
+ TCPSTAT_INC(tcps_sndtotal);
+ } else {
+ syncache_drop(sc, sch);
+ TCPSTAT_INC(tcps_sc_dropped);
+ }
}
} else {
if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
@@ -1549,6 +1561,9 @@ syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
syncache_timeout(sc, sch, 1);
TCPSTAT_INC(tcps_sndacks);
TCPSTAT_INC(tcps_sndtotal);
+ } else {
+ syncache_drop(sc, sch);
+ TCPSTAT_INC(tcps_sc_dropped);
}
SCH_UNLOCK(sch);
goto donenoprobe;
diff --git a/sys/netipsec/ipsec_offload.c b/sys/netipsec/ipsec_offload.c
index bbf98ac7a676..19719a8f171b 100644
--- a/sys/netipsec/ipsec_offload.c
+++ b/sys/netipsec/ipsec_offload.c
@@ -69,6 +69,7 @@
static struct mtx ipsec_accel_sav_tmp;
static struct unrhdr *drv_spi_unr;
static struct mtx ipsec_accel_cnt_lock;
+static struct taskqueue *ipsec_accel_tq;
struct ipsec_accel_install_newkey_tq {
struct secasvar *sav;
@@ -97,8 +98,6 @@ struct ifp_handle_sav {
#define IFP_HS_HANDLED 0x00000001
#define IFP_HS_REJECTED 0x00000002
-#define IFP_HS_INPUT 0x00000004
-#define IFP_HS_OUTPUT 0x00000008
#define IFP_HS_MARKER 0x00000010
static CK_LIST_HEAD(, ifp_handle_sav) ipsec_accel_all_sav_handles;
@@ -168,6 +167,11 @@ ipsec_accel_init(void *arg)
mtx_init(&ipsec_accel_cnt_lock, "ipascn", MTX_DEF, 0);
drv_spi_unr = new_unrhdr(IPSEC_ACCEL_DRV_SPI_MIN,
IPSEC_ACCEL_DRV_SPI_MAX, &ipsec_accel_sav_tmp);
+ ipsec_accel_tq = taskqueue_create("ipsec_offload", M_WAITOK,
+ taskqueue_thread_enqueue, &ipsec_accel_tq);
+ (void)taskqueue_start_threads(&ipsec_accel_tq,
+ 1 /* Must be single-threaded */, PWAIT,
+ "ipsec_offload");
ipsec_accel_sa_newkey_p = ipsec_accel_sa_newkey_impl;
ipsec_accel_forget_sav_p = ipsec_accel_forget_sav_impl;
ipsec_accel_spdadd_p = ipsec_accel_spdadd_impl;
@@ -209,6 +213,8 @@ ipsec_accel_fini(void *arg)
clean_unrhdr(drv_spi_unr); /* avoid panic, should go later */
clear_unrhdr(drv_spi_unr);
delete_unrhdr(drv_spi_unr);
+ taskqueue_drain_all(ipsec_accel_tq);
+ taskqueue_free(ipsec_accel_tq);
mtx_destroy(&ipsec_accel_sav_tmp);
mtx_destroy(&ipsec_accel_cnt_lock);
}
@@ -346,7 +352,7 @@ ipsec_accel_sa_newkey_act(void *context, int pending)
/*
* If ipsec_accel_forget_sav() raced with us and set
* the flag, do its work. Its task cannot execute in
- * parallel since taskqueue_thread is single-threaded.
+ * parallel since ipsec_accel taskqueue is single-threaded.
*/
if ((sav->accel_flags & SADB_KEY_ACCEL_DEINST) != 0) {
tqf = (void *)sav->accel_forget_tq;
@@ -386,8 +392,8 @@ ipsec_accel_sa_newkey_impl(struct secasvar *sav)
TASK_INIT(&tq->install_task, 0, ipsec_accel_sa_newkey_act, tq);
tq->sav = sav;
- tq->install_vnet = curthread->td_vnet; /* XXXKIB liveness */
- taskqueue_enqueue(taskqueue_thread, &tq->install_task);
+ tq->install_vnet = curthread->td_vnet;
+ taskqueue_enqueue(ipsec_accel_tq, &tq->install_task);
}
static int
@@ -405,8 +411,7 @@ ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
ihs->drv_spi = drv_spi;
ihs->ifdata = priv;
ihs->flags = flags;
- if ((flags & IFP_HS_OUTPUT) != 0)
- ihs->hdr_ext_size = esp_hdrsiz(sav);
+ ihs->hdr_ext_size = esp_hdrsiz(sav);
mtx_lock(&ipsec_accel_sav_tmp);
CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
if (i->ifp == ifp) {
@@ -511,7 +516,7 @@ ipsec_accel_forget_sav_impl(struct secasvar *sav)
TASK_INIT(&tq->forget_task, 0, ipsec_accel_forget_sav_act, tq);
tq->forget_vnet = curthread->td_vnet;
tq->sav = sav;
- taskqueue_enqueue(taskqueue_thread, &tq->forget_task);
+ taskqueue_enqueue(ipsec_accel_tq, &tq->forget_task);
}
static void
@@ -699,7 +704,7 @@ ipsec_accel_spdadd_impl(struct secpolicy *sp, struct inpcb *inp)
in_pcbref(inp);
TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spdadd_act, sp);
key_addref(sp);
- taskqueue_enqueue(taskqueue_thread, &tq->adddel_task);
+ taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
}
static void
@@ -754,7 +759,7 @@ ipsec_accel_spddel_impl(struct secpolicy *sp)
tq->adddel_vnet = curthread->td_vnet;
TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spddel_act, sp);
key_addref(sp);
- taskqueue_enqueue(taskqueue_thread, &tq->adddel_task);
+ taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
}
static void
@@ -876,7 +881,8 @@ ipsec_accel_output(struct ifnet *ifp, struct mbuf *m, struct inpcb *inp,
}
i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
- if (i == NULL)
+ if (i == NULL || (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
+ IFP_HS_HANDLED)
goto out;
if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
@@ -1126,7 +1132,7 @@ ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
static void
ipsec_accel_sync_imp(void)
{
- taskqueue_drain_all(taskqueue_thread);
+ taskqueue_drain_all(ipsec_accel_tq);
}
static struct mbuf *
diff --git a/sys/netipsec/key.c b/sys/netipsec/key.c
index 5a3e5727bc2e..ad1d6164f158 100644
--- a/sys/netipsec/key.c
+++ b/sys/netipsec/key.c
@@ -8713,6 +8713,9 @@ key_vnet_destroy(void *arg __unused)
}
SAHTREE_WUNLOCK();
+ /* Wait for async work referencing this VNET to finish. */
+ ipsec_accel_sync();
+
key_freesah_flushed(&sahdrainq);
hashdestroy(V_sphashtbl, M_IPSEC_SP, V_sphash_mask);
hashdestroy(V_savhashtbl, M_IPSEC_SA, V_savhash_mask);
diff --git a/sys/netpfil/ipfw/dn_sched_fq_pie.c b/sys/netpfil/ipfw/dn_sched_fq_pie.c
index 632bfd4b7152..06700b0f93af 100644
--- a/sys/netpfil/ipfw/dn_sched_fq_pie.c
+++ b/sys/netpfil/ipfw/dn_sched_fq_pie.c
@@ -744,6 +744,9 @@ pie_enqueue(struct fq_pie_flow *q, struct mbuf* m, struct fq_pie_si *si)
}
if (t != DROP) {
+ if (m->m_pkthdr.rcvif != NULL)
+ m_rcvif_serialize(m);
+
mq_append(&q->mq, m);
fq_update_stats(q, si, len, 0);
return 0;
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index acbaf304995f..70220dda935e 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -283,6 +283,7 @@ VNET_DEFINE_STATIC(uma_zone_t, pf_sources_z);
uma_zone_t pf_mtag_z;
VNET_DEFINE(uma_zone_t, pf_state_z);
VNET_DEFINE(uma_zone_t, pf_state_key_z);
+VNET_DEFINE(uma_zone_t, pf_udp_mapping_z);
VNET_DEFINE(struct unrhdr64, pf_stateid);
@@ -330,7 +331,10 @@ static int pf_create_state(struct pf_krule *, struct pf_krule *,
struct pf_state_key *, struct mbuf *, int,
u_int16_t, u_int16_t, int *, struct pfi_kkif *,
struct pf_kstate **, int, u_int16_t, u_int16_t,
- int, struct pf_krule_slist *);
+ int, struct pf_krule_slist *, struct pf_udp_mapping *);
+static int pf_state_key_addr_setup(struct pf_pdesc *, struct mbuf *,
+ int, struct pf_state_key_cmp *, int, struct pf_addr *,
+ int, struct pf_addr *, int);
static int pf_test_fragment(struct pf_krule **, struct pfi_kkif *,
struct mbuf *, void *, struct pf_pdesc *,
struct pf_krule **, struct pf_kruleset **);
@@ -347,7 +351,7 @@ static int pf_test_state_udp(struct pf_kstate **,
void *, struct pf_pdesc *);
int pf_icmp_state_lookup(struct pf_state_key_cmp *,
struct pf_pdesc *, struct pf_kstate **, struct mbuf *,
- int, struct pfi_kkif *, u_int16_t, u_int16_t,
+ int, int, struct pfi_kkif *, u_int16_t, u_int16_t,
int, int *, int, int);
static int pf_test_state_icmp(struct pf_kstate **,
struct pfi_kkif *, struct mbuf *, int,
@@ -401,7 +405,7 @@ extern struct proc *pf_purge_proc;
VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
-enum { PF_ICMP_MULTI_NONE, PF_ICMP_MULTI_SOLICITED, PF_ICMP_MULTI_LINK };
+enum { PF_ICMP_MULTI_NONE, PF_ICMP_MULTI_LINK };
#define PACKET_UNDO_NAT(_m, _pd, _off, _s) \
do { \
@@ -490,22 +494,29 @@ MALLOC_DEFINE(M_PF_RULE_ITEM, "pf_krule_item", "pf(4) rule items");
VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
VNET_DEFINE(struct pf_idhash *, pf_idhash);
VNET_DEFINE(struct pf_srchash *, pf_srchash);
+VNET_DEFINE(struct pf_udpendpointhash *, pf_udpendpointhash);
+VNET_DEFINE(struct pf_udpendpointmapping *, pf_udpendpointmapping);
SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"pf(4)");
VNET_DEFINE(u_long, pf_hashmask);
VNET_DEFINE(u_long, pf_srchashmask);
+VNET_DEFINE(u_long, pf_udpendpointhashmask);
VNET_DEFINE_STATIC(u_long, pf_hashsize);
#define V_pf_hashsize VNET(pf_hashsize)
VNET_DEFINE_STATIC(u_long, pf_srchashsize);
#define V_pf_srchashsize VNET(pf_srchashsize)
+VNET_DEFINE_STATIC(u_long, pf_udpendpointhashsize);
+#define V_pf_udpendpointhashsize VNET(pf_udpendpointhashsize)
u_long pf_ioctl_maxcount = 65535;
SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
&VNET_NAME(pf_hashsize), 0, "Size of pf(4) states hashtable");
SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
&VNET_NAME(pf_srchashsize), 0, "Size of pf(4) source nodes hashtable");
+SYSCTL_ULONG(_net_pf, OID_AUTO, udpendpoint_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
+ &VNET_NAME(pf_udpendpointhashsize), 0, "Size of pf(4) endpoint hashtable");
SYSCTL_ULONG(_net_pf, OID_AUTO, request_maxcount, CTLFLAG_RWTUN,
&pf_ioctl_maxcount, 0, "Maximum number of tables, addresses, ... in a single ioctl() call");
@@ -696,6 +707,17 @@ pf_hashsrc(struct pf_addr *addr, sa_family_t af)
return (h & V_pf_srchashmask);
}
+static inline uint32_t
+pf_hashudpendpoint(struct pf_udp_endpoint *endpoint)
+{
+ uint32_t h;
+
+ h = murmur3_32_hash32((uint32_t *)endpoint,
+ sizeof(struct pf_udp_endpoint_cmp)/sizeof(uint32_t),
+ V_pf_hashseed);
+ return (h & V_pf_udpendpointhashmask);
+}
+
#ifdef ALTQ
static int
pf_state_hash(struct pf_kstate *s)
@@ -1083,12 +1105,15 @@ pf_initialize(void)
struct pf_keyhash *kh;
struct pf_idhash *ih;
struct pf_srchash *sh;
+ struct pf_udpendpointhash *uh;
u_int i;
if (V_pf_hashsize == 0 || !powerof2(V_pf_hashsize))
V_pf_hashsize = PF_HASHSIZ;
if (V_pf_srchashsize == 0 || !powerof2(V_pf_srchashsize))
V_pf_srchashsize = PF_SRCHASHSIZ;
+ if (V_pf_udpendpointhashsize == 0 || !powerof2(V_pf_udpendpointhashsize))
+ V_pf_udpendpointhashsize = PF_UDPENDHASHSIZ;
V_pf_hashseed = arc4random();
@@ -1151,6 +1176,30 @@ pf_initialize(void)
for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++)
mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
+
+ /* UDP endpoint mappings. */
+ V_pf_udp_mapping_z = uma_zcreate("pf UDP mappings",
+ sizeof(struct pf_udp_mapping), NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, 0);
+ V_pf_udpendpointhash = mallocarray(V_pf_udpendpointhashsize,
+ sizeof(struct pf_udpendpointhash), M_PFHASH, M_NOWAIT | M_ZERO);
+ if (V_pf_udpendpointhash == NULL) {
+ printf("pf: Unable to allocate memory for "
+ "udpendpoint_hashsize %lu.\n", V_pf_udpendpointhashsize);
+
+ V_pf_udpendpointhashsize = PF_UDPENDHASHSIZ;
+ V_pf_udpendpointhash = mallocarray(V_pf_udpendpointhashsize,
+ sizeof(struct pf_udpendpointhash), M_PFHASH, M_WAITOK | M_ZERO);
+ }
+
+ V_pf_udpendpointhashmask = V_pf_udpendpointhashsize - 1;
+ for (i = 0, uh = V_pf_udpendpointhash;
+ i <= V_pf_udpendpointhashmask;
+ i++, uh++) {
+ mtx_init(&uh->lock, "pf_udpendpointhash", NULL,
+ MTX_DEF | MTX_DUPOK);
+ }
+
/* ALTQ */
TAILQ_INIT(&V_pf_altqs[0]);
TAILQ_INIT(&V_pf_altqs[1]);
@@ -1184,10 +1233,12 @@ pf_cleanup(void)
struct pf_keyhash *kh;
struct pf_idhash *ih;
struct pf_srchash *sh;
+ struct pf_udpendpointhash *uh;
struct pf_send_entry *pfse, *next;
u_int i;
- for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask;
+ for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash;
+ i <= V_pf_hashmask;
i++, kh++, ih++) {
KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
__func__));
@@ -1206,6 +1257,15 @@ pf_cleanup(void)
}
free(V_pf_srchash, M_PFHASH);
+ for (i = 0, uh = V_pf_udpendpointhash;
+ i <= V_pf_udpendpointhashmask;
+ i++, uh++) {
+ KASSERT(LIST_EMPTY(&uh->endpoints),
+ ("%s: udp endpoint hash not empty", __func__));
+ mtx_destroy(&uh->lock);
+ }
+ free(V_pf_udpendpointhash, M_PFHASH);
+
STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
m_freem(pfse->pfse_m);
free(pfse, M_PFTEMP);
@@ -1215,6 +1275,7 @@ pf_cleanup(void)
uma_zdestroy(V_pf_sources_z);
uma_zdestroy(V_pf_state_z);
uma_zdestroy(V_pf_state_key_z);
+ uma_zdestroy(V_pf_udp_mapping_z);
}
static int
@@ -1495,9 +1556,66 @@ pf_state_key_ctor(void *mem, int size, void *arg, int flags)
return (0);
}
+static int
+pf_state_key_addr_setup(struct pf_pdesc *pd, struct mbuf *m, int off,
+ struct pf_state_key_cmp *key, int sidx, struct pf_addr *saddr,
+ int didx, struct pf_addr *daddr, int multi)
+{
+#ifdef INET6
+ struct nd_neighbor_solicit nd;
+ struct pf_addr *target;
+ u_short action, reason;
+
+ if (pd->af == AF_INET || pd->proto != IPPROTO_ICMPV6)
+ goto copy;
+
+ switch (pd->hdr.icmp6.icmp6_type) {
+ case ND_NEIGHBOR_SOLICIT:
+ if (multi)
+ return (-1);
+ if (!pf_pull_hdr(m, off, &nd, sizeof(nd), &action, &reason, pd->af))
+ return (-1);
+ target = (struct pf_addr *)&nd.nd_ns_target;
+ daddr = target;
+ break;
+ case ND_NEIGHBOR_ADVERT:
+ if (multi)
+ return (-1);
+ if (!pf_pull_hdr(m, off, &nd, sizeof(nd), &action, &reason, pd->af))
+ return (-1);
+ target = (struct pf_addr *)&nd.nd_ns_target;
+ saddr = target;
+ if (IN6_IS_ADDR_MULTICAST(&pd->dst->v6)) {
+ key->addr[didx].addr32[0] = 0;
+ key->addr[didx].addr32[1] = 0;
+ key->addr[didx].addr32[2] = 0;
+ key->addr[didx].addr32[3] = 0;
+ daddr = NULL; /* overwritten */
+ }
+ break;
+ default:
+ if (multi == PF_ICMP_MULTI_LINK) {
+ key->addr[sidx].addr32[0] = IPV6_ADDR_INT32_MLL;
+ key->addr[sidx].addr32[1] = 0;
+ key->addr[sidx].addr32[2] = 0;
+ key->addr[sidx].addr32[3] = IPV6_ADDR_INT32_ONE;
+ saddr = NULL; /* overwritten */
+ }
+ }
+copy:
+#endif
+ if (saddr)
+ PF_ACPY(&key->addr[sidx], saddr, pd->af);
+ if (daddr)
+ PF_ACPY(&key->addr[didx], daddr, pd->af);
+
+ return (0);
+}
+
struct pf_state_key *
-pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
- struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
+pf_state_key_setup(struct pf_pdesc *pd, struct mbuf *m, int off,
+ struct pf_addr *saddr, struct pf_addr *daddr, u_int16_t sport,
+ u_int16_t dport)
{
struct pf_state_key *sk;
@@ -1505,8 +1623,12 @@ pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
if (sk == NULL)
return (NULL);
- PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
- PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
+ if (pf_state_key_addr_setup(pd, m, off, (struct pf_state_key_cmp *)sk,
+ pd->sidx, pd->src, pd->didx, pd->dst, 0)) {
+ uma_zfree(V_pf_state_key_z, sk);
+ return (NULL);
+ }
+
sk->port[pd->sidx] = sport;
sk->port[pd->didx] = dport;
sk->proto = pd->proto;
@@ -1743,6 +1865,123 @@ pf_find_state_all_exists(const struct pf_state_key_cmp *key, u_int dir)
return (false);
}
+struct pf_udp_mapping *
+pf_udp_mapping_create(sa_family_t af, struct pf_addr *src_addr, uint16_t src_port,
+ struct pf_addr *nat_addr, uint16_t nat_port)
+{
+ struct pf_udp_mapping *mapping;
+
+ mapping = uma_zalloc(V_pf_udp_mapping_z, M_NOWAIT | M_ZERO);
+ if (mapping == NULL)
+ return (NULL);
+ PF_ACPY(&mapping->endpoints[0].addr, src_addr, af);
+ mapping->endpoints[0].port = src_port;
+ mapping->endpoints[0].af = af;
+ mapping->endpoints[0].mapping = mapping;
+ PF_ACPY(&mapping->endpoints[1].addr, nat_addr, af);
+ mapping->endpoints[1].port = nat_port;
+ mapping->endpoints[1].af = af;
+ mapping->endpoints[1].mapping = mapping;
+ refcount_init(&mapping->refs, 1);
+ return (mapping);
+}
+
+int
+pf_udp_mapping_insert(struct pf_udp_mapping *mapping)
+{
+ struct pf_udpendpointhash *h0, *h1;
+ struct pf_udp_endpoint *endpoint;
+ int ret = EEXIST;
+
+ h0 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[0])];
+ h1 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[1])];
+ if (h0 == h1) {
+ PF_HASHROW_LOCK(h0);
+ } else if (h0 < h1) {
+ PF_HASHROW_LOCK(h0);
+ PF_HASHROW_LOCK(h1);
+ } else {
+ PF_HASHROW_LOCK(h1);
+ PF_HASHROW_LOCK(h0);
+ }
+
+ LIST_FOREACH(endpoint, &h0->endpoints, entry) {
+ if (bcmp(endpoint, &mapping->endpoints[0],
+ sizeof(struct pf_udp_endpoint_cmp)) == 0)
+ break;
+ }
+ if (endpoint != NULL)
+ goto cleanup;
+ LIST_FOREACH(endpoint, &h1->endpoints, entry) {
+ if (bcmp(endpoint, &mapping->endpoints[1],
+ sizeof(struct pf_udp_endpoint_cmp)) == 0)
+ break;
+ }
+ if (endpoint != NULL)
+ goto cleanup;
+ LIST_INSERT_HEAD(&h0->endpoints, &mapping->endpoints[0], entry);
+ LIST_INSERT_HEAD(&h1->endpoints, &mapping->endpoints[1], entry);
+ ret = 0;
+
+cleanup:
+ if (h0 != h1) {
+ PF_HASHROW_UNLOCK(h0);
+ PF_HASHROW_UNLOCK(h1);
+ } else {
+ PF_HASHROW_UNLOCK(h0);
+ }
+ return (ret);
+}
+
+void
+pf_udp_mapping_release(struct pf_udp_mapping *mapping)
+{
+ /* refcount is synchronized on the source endpoint's row lock */
+ struct pf_udpendpointhash *h0, *h1;
+
+ if (mapping == NULL)
+ return;
+
+ h0 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[0])];
+ PF_HASHROW_LOCK(h0);
+ if (refcount_release(&mapping->refs)) {
+ LIST_REMOVE(&mapping->endpoints[0], entry);
+ PF_HASHROW_UNLOCK(h0);
+ h1 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[1])];
+ PF_HASHROW_LOCK(h1);
+ LIST_REMOVE(&mapping->endpoints[1], entry);
+ PF_HASHROW_UNLOCK(h1);
+
+ uma_zfree(V_pf_udp_mapping_z, mapping);
+ } else {
+ PF_HASHROW_UNLOCK(h0);
+ }
+}
+
+
+struct pf_udp_mapping *
+pf_udp_mapping_find(struct pf_udp_endpoint_cmp *key)
+{
+ struct pf_udpendpointhash *uh;
+ struct pf_udp_endpoint *endpoint;
+
+ uh = &V_pf_udpendpointhash[pf_hashudpendpoint((struct pf_udp_endpoint*)key)];
+
+ PF_HASHROW_LOCK(uh);
+ LIST_FOREACH(endpoint, &uh->endpoints, entry) {
+ if (bcmp(endpoint, key, sizeof(struct pf_udp_endpoint_cmp)) == 0 &&
+ bcmp(endpoint, &endpoint->mapping->endpoints[0],
+ sizeof(struct pf_udp_endpoint_cmp)) == 0)
+ break;
+ }
+ if (endpoint == NULL) {
+ PF_HASHROW_UNLOCK(uh);
+ return (NULL);
+ }
+ refcount_acquire(&endpoint->mapping->refs);
+ PF_HASHROW_UNLOCK(uh);
+ return (endpoint->mapping);
+}
/* END state table stuff */
static void
@@ -2359,6 +2598,9 @@ pf_unlink_state(struct pf_kstate *s)
PF_HASHROW_UNLOCK(ih);
pf_detach_state(s);
+
+ pf_udp_mapping_release(s->udp_mapping);
+
/* pf_state_insert() initialises refs to 2 */
return (pf_release_staten(s, 2));
}
@@ -4622,6 +4864,7 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, struct pfi_kkif *kif,
u_int16_t bproto_sum = 0, bip_sum = 0;
u_int8_t icmptype = 0, icmpcode = 0;
struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
+ struct pf_udp_mapping *udp_mapping = NULL;
PF_RULES_RASSERT();
@@ -4696,7 +4939,7 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, struct pfi_kkif *kif,
/* check packet for BINAT/NAT/RDR */
transerror = pf_get_translation(pd, m, off, kif, &nsn, &sk,
- &nk, saddr, daddr, sport, dport, anchor_stack, &nr);
+ &nk, saddr, daddr, sport, dport, anchor_stack, &nr, &udp_mapping);
switch (transerror) {
default:
/* A translation error occurred. */
@@ -4994,8 +5237,9 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, struct pfi_kkif *kif,
int action;
action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
- hdrlen, &match_rules);
+ hdrlen, &match_rules, udp_mapping);
if (action != PF_PASS) {
+ pf_udp_mapping_release(udp_mapping);
if (action == PF_DROP &&
(r->rule_flag & PFRULE_RETURN))
pf_return(r, nr, pd, sk, off, m, th, kif,
@@ -5011,6 +5255,7 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, struct pfi_kkif *kif,
uma_zfree(V_pf_state_key_z, sk);
uma_zfree(V_pf_state_key_z, nk);
+ pf_udp_mapping_release(udp_mapping);
}
/* copy back packet headers if we performed NAT operations */
@@ -5038,6 +5283,8 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, struct pfi_kkif *kif,
uma_zfree(V_pf_state_key_z, sk);
uma_zfree(V_pf_state_key_z, nk);
+ pf_udp_mapping_release(udp_mapping);
+
return (PF_DROP);
}
@@ -5047,7 +5294,7 @@ pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
u_int16_t dport, int *rewrite, struct pfi_kkif *kif, struct pf_kstate **sm,
int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen,
- struct pf_krule_slist *match_rules)
+ struct pf_krule_slist *match_rules, struct pf_udp_mapping *udp_mapping)
{
struct pf_kstate *s = NULL;
struct pf_ksrc_node *sn = NULL;
@@ -5210,7 +5457,7 @@ pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
if (nr == NULL) {
KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
__func__, nr, sk, nk));
- sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
+ sk = pf_state_key_setup(pd, m, off, pd->src, pd->dst, sport, dport);
if (sk == NULL)
goto csfailed;
nk = sk;
@@ -5264,6 +5511,8 @@ pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
return (PF_SYNPROXY_DROP);
}
+ s->udp_mapping = udp_mapping;
+
return (PF_PASS);
csfailed:
@@ -6655,9 +6904,9 @@ pf_multihome_scan_asconf(struct mbuf *m, int start, int len,
int
pf_icmp_state_lookup(struct pf_state_key_cmp *key, struct pf_pdesc *pd,
- struct pf_kstate **state, struct mbuf *m, int direction, struct pfi_kkif *kif,
- u_int16_t icmpid, u_int16_t type, int icmp_dir, int *iidx, int multi,
- int inner)
+ struct pf_kstate **state, struct mbuf *m, int off, int direction,
+ struct pfi_kkif *kif, u_int16_t icmpid, u_int16_t type, int icmp_dir,
+ int *iidx, int multi, int inner)
{
key->af = pd->af;
key->proto = pd->proto;
@@ -6670,28 +6919,15 @@ pf_icmp_state_lookup(struct pf_state_key_cmp *key, struct pf_pdesc *pd,
key->port[pd->sidx] = type;
key->port[pd->didx] = icmpid;
}
- if (pd->af == AF_INET6 && multi != PF_ICMP_MULTI_NONE) {
- switch (multi) {
- case PF_ICMP_MULTI_SOLICITED:
- key->addr[pd->sidx].addr32[0] = IPV6_ADDR_INT32_MLL;
- key->addr[pd->sidx].addr32[1] = 0;
- key->addr[pd->sidx].addr32[2] = IPV6_ADDR_INT32_ONE;
- key->addr[pd->sidx].addr32[3] = pd->src->addr32[3];
- key->addr[pd->sidx].addr8[12] = 0xff;
- break;
- case PF_ICMP_MULTI_LINK:
- key->addr[pd->sidx].addr32[0] = IPV6_ADDR_INT32_MLL;
- key->addr[pd->sidx].addr32[1] = 0;
- key->addr[pd->sidx].addr32[2] = 0;
- key->addr[pd->sidx].addr32[3] = IPV6_ADDR_INT32_ONE;
- break;
- }
- } else
- PF_ACPY(&key->addr[pd->sidx], pd->src, key->af);
- PF_ACPY(&key->addr[pd->didx], pd->dst, key->af);
+ if (pf_state_key_addr_setup(pd, m, off, key, pd->sidx, pd->src,
+ pd->didx, pd->dst, multi))
+ return (PF_DROP);
STATE_LOOKUP(kif, key, *state, pd);
+ if ((*state)->state_flags & PFSTATE_SLOPPY)
+ return (-1);
+
/* Is this ICMP message flowing in right direction? */
if ((*state)->rule.ptr->type &&
(((!inner && (*state)->direction == direction) ||
@@ -6703,6 +6939,8 @@ pf_icmp_state_lookup(struct pf_state_key_cmp *key, struct pf_pdesc *pd,
pf_print_state(*state);
printf("\n");
}
+ PF_STATE_UNLOCK(*state);
+ *state = NULL;
return (PF_DROP);
}
return (-1);
@@ -6751,19 +6989,20 @@ pf_test_state_icmp(struct pf_kstate **state, struct pfi_kkif *kif,
* ICMP query/reply message not related to a TCP/UDP packet.
* Search for an ICMP state.
*/
- ret = pf_icmp_state_lookup(&key, pd, state, m, pd->dir,
+ ret = pf_icmp_state_lookup(&key, pd, state, m, off, pd->dir,
kif, virtual_id, virtual_type, icmp_dir, &iidx,
PF_ICMP_MULTI_NONE, 0);
if (ret >= 0) {
+ MPASS(*state == NULL);
if (ret == PF_DROP && pd->af == AF_INET6 &&
icmp_dir == PF_OUT) {
- if (*state != NULL)
- PF_STATE_UNLOCK((*state));
- ret = pf_icmp_state_lookup(&key, pd, state, m,
+ ret = pf_icmp_state_lookup(&key, pd, state, m, off,
pd->dir, kif, virtual_id, virtual_type,
icmp_dir, &iidx, multi, 0);
- if (ret >= 0)
+ if (ret >= 0) {
+ MPASS(*state == NULL);
return (ret);
+ }
} else
return (ret);
}
@@ -7167,11 +7406,13 @@ pf_test_state_icmp(struct pf_kstate **state, struct pfi_kkif *kif,
pf_icmp_mapping(&pd2, iih->icmp_type,
&icmp_dir, &multi, &virtual_id, &virtual_type);
- ret = pf_icmp_state_lookup(&key, &pd2, state, m,
+ ret = pf_icmp_state_lookup(&key, &pd2, state, m, off,
pd2.dir, kif, virtual_id, virtual_type,
icmp_dir, &iidx, PF_ICMP_MULTI_NONE, 1);
- if (ret >= 0)
+ if (ret >= 0) {
+ MPASS(*state == NULL);
return (ret);
+ }
/* translate source/destination address, if necessary */
if ((*state)->key[PF_SK_WIRE] !=
@@ -7222,20 +7463,21 @@ pf_test_state_icmp(struct pf_kstate **state, struct pfi_kkif *kif,
pf_icmp_mapping(&pd2, iih->icmp6_type,
&icmp_dir, &multi, &virtual_id, &virtual_type);
- ret = pf_icmp_state_lookup(&key, &pd2, state, m,
+ ret = pf_icmp_state_lookup(&key, &pd2, state, m, off,
pd->dir, kif, virtual_id, virtual_type,
icmp_dir, &iidx, PF_ICMP_MULTI_NONE, 1);
if (ret >= 0) {
- if (ret == PF_DROP && pd->af == AF_INET6 &&
+ MPASS(*state == NULL);
+ if (ret == PF_DROP && pd2.af == AF_INET6 &&
icmp_dir == PF_OUT) {
- if (*state != NULL)
- PF_STATE_UNLOCK((*state));
- ret = pf_icmp_state_lookup(&key, pd,
- state, m, pd->dir, kif,
+ ret = pf_icmp_state_lookup(&key, &pd2,
+ state, m, off, pd->dir, kif,
virtual_id, virtual_type,
icmp_dir, &iidx, multi, 1);
- if (ret >= 0)
+ if (ret >= 0) {
+ MPASS(*state == NULL);
return (ret);
+ }
} else
return (ret);
}
diff --git a/sys/netpfil/pf/pf.h b/sys/netpfil/pf/pf.h
index a12ee4317d5d..16cc9c0c8580 100644
--- a/sys/netpfil/pf/pf.h
+++ b/sys/netpfil/pf/pf.h
@@ -129,6 +129,7 @@ enum { PF_ADDR_ADDRMASK, PF_ADDR_NOROUTE, PF_ADDR_DYNIFTL,
PF_ADDR_RANGE };
#define PF_POOL_TYPEMASK 0x0f
#define PF_POOL_STICKYADDR 0x20
+#define PF_POOL_ENDPI 0x40
#define PF_WSCALE_FLAG 0x80
#define PF_WSCALE_MASK 0x0f
diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c
index fc6e76204c8a..8d6e642d4b34 100644
--- a/sys/netpfil/pf/pf_ioctl.c
+++ b/sys/netpfil/pf/pf_ioctl.c
@@ -2823,9 +2823,6 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
/* Copy the request in */
packed = malloc(nv->len, M_NVLIST, M_WAITOK);
- if (packed == NULL)
- ERROUT(ENOMEM);
-
error = copyin(nv->data, packed, nv->len);
if (error)
ERROUT(error);
@@ -2903,9 +2900,6 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
ERROUT(ENOMEM);
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
- if (nvlpacked == NULL)
- ERROUT(ENOMEM);
-
error = copyin(nv->data, nvlpacked, nv->len);
if (error)
ERROUT(error);
@@ -3003,9 +2997,6 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
ERROUT(ENOMEM);
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
- if (nvlpacked == NULL)
- ERROUT(ENOMEM);
-
error = copyin(nv->data, nvlpacked, nv->len);
if (error)
ERROUT(error);
@@ -3036,8 +3027,6 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
}
rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
- if (rule == NULL)
- ERROUT(ENOMEM);
rule->timestamp = NULL;
error = pf_nveth_rule_to_keth_rule(nvl, rule);
@@ -3136,9 +3125,6 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
ERROUT(ENOMEM);
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
- if (nvlpacked == NULL)
- ERROUT(ENOMEM);
-
error = copyin(nv->data, nvlpacked, nv->len);
if (error)
ERROUT(error);
@@ -3214,9 +3200,6 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
ERROUT(ENOMEM);
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
- if (nvlpacked == NULL)
- ERROUT(ENOMEM);
-
error = copyin(nv->data, nvlpacked, nv->len);
if (error)
ERROUT(error);
@@ -3409,9 +3392,6 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
/* Copy the request in */
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
- if (nvlpacked == NULL)
- ERROUT(ENOMEM);
-
error = copyin(nv->data, nvlpacked, nv->len);
if (error)
ERROUT(error);
@@ -6009,9 +5989,6 @@ pf_keepcounters(struct pfioc_nv *nv)
ERROUT(ENOMEM);
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
- if (nvlpacked == NULL)
- ERROUT(ENOMEM);
-
error = copyin(nv->data, nvlpacked, nv->len);
if (error)
ERROUT(error);
@@ -6137,9 +6114,6 @@ pf_killstates_nv(struct pfioc_nv *nv)
ERROUT(ENOMEM);
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
- if (nvlpacked == NULL)
- ERROUT(ENOMEM);
-
error = copyin(nv->data, nvlpacked, nv->len);
if (error)
ERROUT(error);
@@ -6198,9 +6172,6 @@ pf_clearstates_nv(struct pfioc_nv *nv)
ERROUT(ENOMEM);
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
- if (nvlpacked == NULL)
- ERROUT(ENOMEM);
-
error = copyin(nv->data, nvlpacked, nv->len);
if (error)
ERROUT(error);
@@ -6259,9 +6230,6 @@ pf_getstate(struct pfioc_nv *nv)
ERROUT(ENOMEM);
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
- if (nvlpacked == NULL)
- ERROUT(ENOMEM);
-
error = copyin(nv->data, nvlpacked, nv->len);
if (error)
ERROUT(error);
diff --git a/sys/netpfil/pf/pf_lb.c b/sys/netpfil/pf/pf_lb.c
index 68fc76233dab..cdd68aaf5dab 100644
--- a/sys/netpfil/pf/pf_lb.c
+++ b/sys/netpfil/pf/pf_lb.c
@@ -62,7 +62,8 @@ static struct pf_krule *pf_match_translation(struct pf_pdesc *, struct mbuf *,
uint16_t, int, struct pf_kanchor_stackframe *);
static int pf_get_sport(sa_family_t, uint8_t, struct pf_krule *,
struct pf_addr *, uint16_t, struct pf_addr *, uint16_t, struct pf_addr *,
- uint16_t *, uint16_t, uint16_t, struct pf_ksrc_node **);
+ uint16_t *, uint16_t, uint16_t, struct pf_ksrc_node **,
+ struct pf_udp_mapping **);
#define mix(a,b,c) \
do { \
@@ -216,14 +217,47 @@ static int
pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_krule *r,
struct pf_addr *saddr, uint16_t sport, struct pf_addr *daddr,
uint16_t dport, struct pf_addr *naddr, uint16_t *nport, uint16_t low,
- uint16_t high, struct pf_ksrc_node **sn)
+ uint16_t high, struct pf_ksrc_node **sn,
+ struct pf_udp_mapping **udp_mapping)
{
struct pf_state_key_cmp key;
struct pf_addr init_addr;
+ struct pf_srchash *sh = NULL;
bzero(&init_addr, sizeof(init_addr));
+
+ MPASS(*udp_mapping == NULL);
+
+ /*
+ * If we are UDP and have an existing mapping we can get source port
+ * from the mapping. In this case we have to look up the src_node as
+ * pf_map_addr would.
+ */
+ if (proto == IPPROTO_UDP && (r->rpool.opts & PF_POOL_ENDPI)) {
+ struct pf_udp_endpoint_cmp udp_source;
+
+ bzero(&udp_source, sizeof(udp_source));
+ udp_source.af = af;
+ PF_ACPY(&udp_source.addr, saddr, af);
+ udp_source.port = sport;
+ *udp_mapping = pf_udp_mapping_find(&udp_source);
+ if (*udp_mapping) {
+ PF_ACPY(naddr, &(*udp_mapping)->endpoints[1].addr, af);
+ *nport = (*udp_mapping)->endpoints[1].port;
+ /* Try to find a src_node as per pf_map_addr(). */
+ if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR &&
+ (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE)
+ *sn = pf_find_src_node(saddr, r, af, &sh, 0);
+ return (0);
+ } else {
+ *udp_mapping = pf_udp_mapping_create(af, saddr, sport, &init_addr, 0);
+ if (*udp_mapping == NULL)
+ return (1);
+ }
+ }
+
if (pf_map_addr(af, r, saddr, naddr, NULL, &init_addr, sn))
- return (1);
+ goto failed;
if (proto == IPPROTO_ICMP) {
if (*nport == htons(ICMP_ECHO)) {
@@ -250,6 +284,8 @@ pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_krule *r,
do {
PF_ACPY(&key.addr[1], naddr, key.af);
+ if (*udp_mapping)
+ PF_ACPY(&(*udp_mapping)->endpoints[1].addr, naddr, af);
/*
* port search; start random, step;
@@ -277,8 +313,16 @@ pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_krule *r,
} else if (low == high) {
key.port[1] = htons(low);
if (!pf_find_state_all_exists(&key, PF_IN)) {
- *nport = htons(low);
- return (0);
+ if (*udp_mapping != NULL) {
+ (*udp_mapping)->endpoints[1].port = htons(low);
+ if (pf_udp_mapping_insert(*udp_mapping) == 0) {
+ *nport = htons(low);
+ return (0);
+ }
+ } else {
+ *nport = htons(low);
+ return (0);
+ }
}
} else {
uint32_t tmp;
@@ -293,18 +337,35 @@ pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_krule *r,
cut = arc4random() % (1 + high - low) + low;
/* low <= cut <= high */
for (tmp = cut; tmp <= high && tmp <= 0xffff; ++tmp) {
- key.port[1] = htons(tmp);
- if (!pf_find_state_all_exists(&key, PF_IN)) {
- *nport = htons(tmp);
- return (0);
+ if (*udp_mapping != NULL) {
+ (*udp_mapping)->endpoints[1].port = htons(tmp);
+ if (pf_udp_mapping_insert(*udp_mapping) == 0) {
+ *nport = htons(tmp);
+ return (0);
+ }
+ } else {
+ key.port[1] = htons(tmp);
+ if (!pf_find_state_all_exists(&key, PF_IN)) {
+ *nport = htons(tmp);
+ return (0);
+ }
}
}
tmp = cut;
for (tmp -= 1; tmp >= low && tmp <= 0xffff; --tmp) {
- key.port[1] = htons(tmp);
- if (!pf_find_state_all_exists(&key, PF_IN)) {
- *nport = htons(tmp);
- return (0);
+ if (proto == IPPROTO_UDP &&
+ (r->rpool.opts & PF_POOL_ENDPI)) {
+ (*udp_mapping)->endpoints[1].port = htons(tmp);
+ if (pf_udp_mapping_insert(*udp_mapping) == 0) {
+ *nport = htons(tmp);
+ return (0);
+ }
+ } else {
+ key.port[1] = htons(tmp);
+ if (!pf_find_state_all_exists(&key, PF_IN)) {
+ *nport = htons(tmp);
+ return (0);
+ }
}
}
}
@@ -326,6 +387,10 @@ pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_krule *r,
return (1);
}
} while (! PF_AEQ(&init_addr, naddr, af) );
+
+failed:
+ uma_zfree(V_pf_udp_mapping_z, *udp_mapping);
+ *udp_mapping = NULL;
return (1); /* none available */
}
@@ -333,7 +398,7 @@ static int
pf_get_mape_sport(sa_family_t af, u_int8_t proto, struct pf_krule *r,
struct pf_addr *saddr, uint16_t sport, struct pf_addr *daddr,
uint16_t dport, struct pf_addr *naddr, uint16_t *nport,
- struct pf_ksrc_node **sn)
+ struct pf_ksrc_node **sn, struct pf_udp_mapping **udp_mapping)
{
uint16_t psmask, low, highmask;
uint16_t i, ahigh, cut;
@@ -353,13 +418,13 @@ pf_get_mape_sport(sa_family_t af, u_int8_t proto, struct pf_krule *r,
for (i = cut; i <= ahigh; i++) {
low = (i << ashift) | psmask;
if (!pf_get_sport(af, proto, r, saddr, sport, daddr, dport,
- naddr, nport, low, low | highmask, sn))
+ naddr, nport, low, low | highmask, sn, udp_mapping))
return (0);
}
for (i = cut - 1; i > 0; i--) {
low = (i << ashift) | psmask;
if (!pf_get_sport(af, proto, r, saddr, sport, daddr, dport,
- naddr, nport, low, low | highmask, sn))
+ naddr, nport, low, low | highmask, sn, udp_mapping))
return (0);
}
return (1);
@@ -597,7 +662,8 @@ pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
struct pf_state_key **skp, struct pf_state_key **nkp,
struct pf_addr *saddr, struct pf_addr *daddr,
uint16_t sport, uint16_t dport, struct pf_kanchor_stackframe *anchor_stack,
- struct pf_krule **rp)
+ struct pf_krule **rp,
+ struct pf_udp_mapping **udp_mapping)
{
struct pf_krule *r = NULL;
struct pf_addr *naddr;
@@ -637,7 +703,7 @@ pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
return (PFRES_MAX);
}
- *skp = pf_state_key_setup(pd, saddr, daddr, sport, dport);
+ *skp = pf_state_key_setup(pd, m, off, saddr, daddr, sport, dport);
if (*skp == NULL)
return (PFRES_MEMORY);
*nkp = pf_state_key_clone(*skp);
@@ -661,7 +727,7 @@ pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
}
if (r->rpool.mape.offset > 0) {
if (pf_get_mape_sport(pd->af, pd->proto, r, saddr,
- sport, daddr, dport, naddr, nportp, sn)) {
+ sport, daddr, dport, naddr, nportp, sn, udp_mapping)) {
DPFPRINTF(PF_DEBUG_MISC,
("pf: MAP-E port allocation (%u/%u/%u)"
" failed\n",
@@ -672,7 +738,7 @@ pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
goto notrans;
}
} else if (pf_get_sport(pd->af, pd->proto, r, saddr, sport,
- daddr, dport, naddr, nportp, low, high, sn)) {
+ daddr, dport, naddr, nportp, low, high, sn, udp_mapping)) {
DPFPRINTF(PF_DEBUG_MISC,
("pf: NAT proxy port allocation (%u-%u) failed\n",
r->rpool.proxy_port[0], r->rpool.proxy_port[1]));
diff --git a/sys/netpfil/pf/pf_syncookies.c b/sys/netpfil/pf/pf_syncookies.c
index c5ee64c6aed0..538ab1dfd94c 100644
--- a/sys/netpfil/pf/pf_syncookies.c
+++ b/sys/netpfil/pf/pf_syncookies.c
@@ -201,9 +201,6 @@ pf_set_syncookies(struct pfioc_nv *nv)
return (ENOMEM);
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
- if (nvlpacked == NULL)
- return (ENOMEM);
-
error = copyin(nv->data, nvlpacked, nv->len);
if (error)
ERROUT(error);
diff --git a/sys/netsmb/smb_rq.c b/sys/netsmb/smb_rq.c
index 7b9b8fd3dc00..b70d464da550 100644
--- a/sys/netsmb/smb_rq.c
+++ b/sys/netsmb/smb_rq.c
@@ -64,8 +64,6 @@ smb_rq_alloc(struct smb_connobj *layer, u_char cmd, struct smb_cred *scred,
int error;
rqp = malloc(sizeof(*rqp), M_SMBRQ, M_WAITOK);
- if (rqp == NULL)
- return ENOMEM;
error = smb_rq_init(rqp, layer, cmd, scred);
rqp->sr_flags |= SMBR_ALLOCED;
if (error) {
@@ -376,8 +374,6 @@ smb_t2_alloc(struct smb_connobj *layer, u_short setup, struct smb_cred *scred,
int error;
t2p = malloc(sizeof(*t2p), M_SMBRQ, M_WAITOK);
- if (t2p == NULL)
- return ENOMEM;
error = smb_t2_init(t2p, layer, setup, scred);
t2p->t2_flags |= SMBT2_ALLOCED;
if (error) {
diff --git a/sys/netsmb/smb_subr.c b/sys/netsmb/smb_subr.c
index bc2e138f3652..87c2de594e54 100644
--- a/sys/netsmb/smb_subr.c
+++ b/sys/netsmb/smb_subr.c
@@ -149,8 +149,6 @@ smb_memdup(const void *umem, int len)
if (len > 8 * 1024)
return NULL;
p = malloc(len, M_SMBSTR, M_WAITOK);
- if (p == NULL)
- return NULL;
bcopy(umem, p, len);
return p;
}
diff --git a/sys/riscv/include/riscvreg.h b/sys/riscv/include/riscvreg.h
index 65c5a9532f7b..ec8294557504 100644
--- a/sys/riscv/include/riscvreg.h
+++ b/sys/riscv/include/riscvreg.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2015-2017 Ruslan Bukin
+ * Copyright (c) 2015-2024 Ruslan Bukin
* All rights reserved.
*
* Portions of this software were developed by SRI International and the
@@ -213,13 +213,14 @@
(__builtin_constant_p(val) && ((u_long)(val) < 32))
#define csr_swap(csr, val) \
-({ if (CSR_ZIMM(val)) \
+({ u_long ret; \
+ if (CSR_ZIMM(val)) \
__asm __volatile("csrrwi %0, " #csr ", %1" \
- : "=r" (val) : "i" (val)); \
+ : "=r" (ret) : "i" (val)); \
else \
__asm __volatile("csrrw %0, " #csr ", %1" \
- : "=r" (val) : "r" (val)); \
- val; \
+ : "=r" (ret) : "r" (val)); \
+ ret; \
})
#define csr_write(csr, val) \
diff --git a/sys/sys/bitset.h b/sys/sys/bitset.h
index 2b26e8bcdbf9..d7e0b4cd7e41 100644
--- a/sys/sys/bitset.h
+++ b/sys/sys/bitset.h
@@ -232,9 +232,8 @@
} while (0)
/*
- * 'start' and 'end' are 0-based bit (runtime) indices. Note that, as for ffs(),
- * the returned index is 1-based, 0 being reserved to indicate that no bits are
- * set.
+ * 'start' is a 0-based bit index. By contrast, and as for ffs(), the returned
+ * index is 1-based, 0 being reserved to indicate that no bits are set.
*/
#define __BIT_FFS_AT(_s, p, start) __extension__ ({ \
__size_t __i; \
diff --git a/sys/sys/disk/gpt.h b/sys/sys/disk/gpt.h
index 596a5cba1681..426ae835c0c1 100644
--- a/sys/sys/disk/gpt.h
+++ b/sys/sys/disk/gpt.h
@@ -259,6 +259,9 @@ CTASSERT(sizeof(struct gpt_ent) == 128);
#define GPT_ENT_TYPE_HIFIVE_BBL \
{0x2e54b353,0x1271,0x4842,0x80,0x6f,{0xe4,0x36,0xd6,0xaf,0x69,0x85}}
+#define GPT_ENT_TYPE_U_BOOT_ENV \
+ {0x3de21764,0x95bd,0x54bd,0xa5,0xc3,{0x4a,0xbe,0x78,0x6f,0x38,0xa8}}
+
/*
* Boot partition used by GRUB 2.
*/
diff --git a/sys/tests/framework/kern_testfrwk.c b/sys/tests/framework/kern_testfrwk.c
index 19691f1febfc..949300290b9d 100644
--- a/sys/tests/framework/kern_testfrwk.c
+++ b/sys/tests/framework/kern_testfrwk.c
@@ -192,10 +192,6 @@ kerntest_execute(SYSCTL_HANDLER_ARGS)
}
/* Grab some memory */
kte = malloc(sizeof(struct kern_test_entry), M_KTFRWK, M_WAITOK);
- if (kte == NULL) {
- error = ENOMEM;
- goto out;
- }
KTFRWK_LOCK();
TAILQ_FOREACH(li, &kfrwk.kfrwk_testlist, next) {
if (strcmp(li->name, kt.name) == 0) {
@@ -244,10 +240,6 @@ kern_testframework_register(const char *name, kerntfunc func)
return (E2BIG);
}
te = malloc(sizeof(struct kern_test_list), M_KTFRWK, M_WAITOK);
- if (te == NULL) {
- error = ENOMEM;
- goto out;
- }
KTFRWK_LOCK();
/* First does it already exist? */
TAILQ_FOREACH(li, &kfrwk.kfrwk_testlist, next) {
diff --git a/sys/tools/sdiodevs2h.awk b/sys/tools/sdiodevs2h.awk
index 596d09531ab1..99735dc09a62 100644
--- a/sys/tools/sdiodevs2h.awk
+++ b/sys/tools/sdiodevs2h.awk
@@ -149,6 +149,18 @@ function product(hfile)
printf("\n") > hfile
}
+function palias(hfile)
+{
+ nproducts++
+
+ products[nproducts, 1] = $2; # vendor name
+ products[nproducts, 2] = $3; # product id
+ products[nproducts, 3] = $4; # id
+ if (hfile)
+ printf("#define\tSDIO_DEVICE_ID_%s\tSDIO_DEVICE_ID_%s\n", \
+ $2, $3) > hfile
+}
+
function dump_dfile(dfile)
{
printf("\n") > dfile
@@ -243,6 +255,10 @@ while ((getline < srcfile) > 0) {
product(hfile)
continue
}
+ if ($1 == "palias") {
+ palias(hfile)
+ continue
+ }
if ($0 == "")
blanklines++
if (hfile)
diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
index 59ab7d13c55d..cf1ed5818b2f 100644
--- a/sys/vm/vm_phys.c
+++ b/sys/vm/vm_phys.c
@@ -903,26 +903,6 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
return (i);
}
-/*
- * Allocate a contiguous, power of two-sized set of physical pages
- * from the free lists.
- *
- * The free page queues must be locked.
- */
-vm_page_t
-vm_phys_alloc_pages(int domain, int pool, int order)
-{
- vm_page_t m;
- int freelist;
-
- for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
- m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
- if (m != NULL)
- return (m);
- }
- return (NULL);
-}
-
/*
* Allocate a contiguous, power of two-sized set of physical pages from the
* specified free list. The free list must be specified using one of the
@@ -930,7 +910,7 @@ vm_phys_alloc_pages(int domain, int pool, int order)
*
* The free page queues must be locked.
*/
-vm_page_t
+static vm_page_t
vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
{
struct vm_freelist *alt, *fl;
@@ -987,6 +967,26 @@ vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
return (NULL);
}
+/*
+ * Allocate a contiguous, power of two-sized set of physical pages
+ * from the free lists.
+ *
+ * The free page queues must be locked.
+ */
+vm_page_t
+vm_phys_alloc_pages(int domain, int pool, int order)
+{
+ vm_page_t m;
+ int freelist;
+
+ for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
+ m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
+ if (m != NULL)
+ return (m);
+ }
+ return (NULL);
+}
+
/*
* Find the vm_page corresponding to the given physical address, which must lie
* within the given physical memory segment.
diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h
index bd086fd5571f..43d94a9420f2 100644
--- a/sys/vm/vm_phys.h
+++ b/sys/vm/vm_phys.h
@@ -61,8 +61,6 @@ extern int *mem_locality;
void vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end);
vm_page_t vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low,
vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
-vm_page_t vm_phys_alloc_freelist_pages(int domain, int freelist, int pool,
- int order);
int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]);
vm_page_t vm_phys_alloc_pages(int domain, int pool, int order);
int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high);
diff --git a/sys/x86/iommu/intel_ctx.c b/sys/x86/iommu/intel_ctx.c
index baee0109e5a9..5047acd283e9 100644
--- a/sys/x86/iommu/intel_ctx.c
+++ b/sys/x86/iommu/intel_ctx.c
@@ -75,6 +75,9 @@ static void dmar_unref_domain_locked(struct dmar_unit *dmar,
struct dmar_domain *domain);
static void dmar_domain_destroy(struct dmar_domain *domain);
+static void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
+static void dmar_free_ctx(struct dmar_ctx *ctx);
+
static void
dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus)
{
@@ -121,25 +124,6 @@ dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp)
return (ctxp);
}
-static void
-device_tag_init(struct dmar_ctx *ctx, device_t dev)
-{
- struct dmar_domain *domain;
- bus_addr_t maxaddr;
-
- domain = CTX2DOM(ctx);
- maxaddr = MIN(domain->iodom.end, BUS_SPACE_MAXADDR);
- ctx->context.tag->common.impl = &bus_dma_iommu_impl;
- ctx->context.tag->common.boundary = 0;
- ctx->context.tag->common.lowaddr = maxaddr;
- ctx->context.tag->common.highaddr = maxaddr;
- ctx->context.tag->common.maxsize = maxaddr;
- ctx->context.tag->common.nsegments = BUS_SPACE_UNRESTRICTED;
- ctx->context.tag->common.maxsegsz = maxaddr;
- ctx->context.tag->ctx = CTX2IOCTX(ctx);
- ctx->context.tag->owner = dev;
-}
-
static void
ctx_id_entry_init_one(dmar_ctx_entry_t *ctxp, struct dmar_domain *domain,
vm_page_t ctx_root)
@@ -420,7 +404,7 @@ dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped)
}
domain->iodom.flags |= IOMMU_DOMAIN_IDMAP;
} else {
- error = domain_alloc_pgtbl(domain);
+ error = dmar_domain_alloc_pgtbl(domain);
if (error != 0)
goto fail;
/* Disable local apic region access */
@@ -506,7 +490,7 @@ dmar_domain_destroy(struct dmar_domain *domain)
if ((domain->iodom.flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) {
if (domain->pgtbl_obj != NULL)
DMAR_DOMAIN_PGLOCK(domain);
- domain_free_pgtbl(domain);
+ dmar_domain_free_pgtbl(domain);
}
iommu_domain_fini(iodom);
dmar = DOM2DMAR(domain);
@@ -583,7 +567,7 @@ dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
ctx = ctx1;
dmar_ctx_link(ctx);
ctx->context.tag->owner = dev;
- device_tag_init(ctx, dev);
+ iommu_device_tag_init(CTX2IOCTX(ctx), dev);
/*
* This is the first activated context for the
@@ -745,7 +729,7 @@ dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain)
dmar_domain_destroy(domain);
}
-void
+static void
dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
{
struct sf_buf *sf;
@@ -819,7 +803,7 @@ dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
TD_PINNED_ASSERT;
}
-void
+static void
dmar_free_ctx(struct dmar_ctx *ctx)
{
struct dmar_unit *dmar;
@@ -849,25 +833,12 @@ dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid)
return (NULL);
}
-void
-dmar_domain_free_entry(struct iommu_map_entry *entry, bool free)
-{
- if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
- iommu_gas_free_region(entry);
- else
- iommu_gas_free_space(entry);
- if (free)
- iommu_gas_free_entry(entry);
- else
- entry->flags = 0;
-}
-
/*
* If the given value for "free" is true, then the caller must not be using
* the entry's dmamap_link field.
*/
void
-iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
+dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free,
bool cansleep)
{
struct dmar_domain *domain;
@@ -884,17 +855,18 @@ iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
if (unit->qi_enabled) {
if (free) {
DMAR_LOCK(unit);
- dmar_qi_invalidate_locked(domain, entry, true);
+ iommu_qi_invalidate_locked(&domain->iodom, entry,
+ true);
DMAR_UNLOCK(unit);
} else {
- dmar_qi_invalidate_sync(domain, entry->start,
+ iommu_qi_invalidate_sync(&domain->iodom, entry->start,
entry->end - entry->start, cansleep);
- dmar_domain_free_entry(entry, false);
+ iommu_domain_free_entry(entry, false);
}
} else {
domain_flush_iotlb_sync(domain, entry->start, entry->end -
entry->start);
- dmar_domain_free_entry(entry, free);
+ iommu_domain_free_entry(entry, free);
}
}
@@ -905,11 +877,11 @@ dmar_domain_unload_emit_wait(struct dmar_domain *domain,
if (TAILQ_NEXT(entry, dmamap_link) == NULL)
return (true);
- return (domain->batch_no++ % dmar_batch_coalesce == 0);
+ return (domain->batch_no++ % iommu_qi_batch_coalesce == 0);
}
void
-iommu_domain_unload(struct iommu_domain *iodom,
+dmar_domain_unload(struct iommu_domain *iodom,
struct iommu_map_entries_tailq *entries, bool cansleep)
{
struct dmar_domain *domain;
@@ -930,7 +902,7 @@ iommu_domain_unload(struct iommu_domain *iodom,
domain_flush_iotlb_sync(domain, entry->start,
entry->end - entry->start);
TAILQ_REMOVE(entries, entry, dmamap_link);
- dmar_domain_free_entry(entry, true);
+ iommu_domain_free_entry(entry, true);
}
}
if (TAILQ_EMPTY(entries))
@@ -940,44 +912,41 @@ iommu_domain_unload(struct iommu_domain *iodom,
DMAR_LOCK(unit);
while ((entry = TAILQ_FIRST(entries)) != NULL) {
TAILQ_REMOVE(entries, entry, dmamap_link);
- dmar_qi_invalidate_locked(domain, entry,
+ iommu_qi_invalidate_locked(&domain->iodom, entry,
dmar_domain_unload_emit_wait(domain, entry));
}
DMAR_UNLOCK(unit);
}
struct iommu_ctx *
-iommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid,
+dmar_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid,
bool id_mapped, bool rmrr_init)
{
struct dmar_unit *dmar;
struct dmar_ctx *ret;
dmar = IOMMU2DMAR(iommu);
-
ret = dmar_get_ctx_for_dev(dmar, dev, rid, id_mapped, rmrr_init);
-
return (CTX2IOCTX(ret));
}
void
-iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *context)
+dmar_free_ctx_locked_method(struct iommu_unit *iommu,
+ struct iommu_ctx *context)
{
struct dmar_unit *dmar;
struct dmar_ctx *ctx;
dmar = IOMMU2DMAR(iommu);
ctx = IOCTX2CTX(context);
-
dmar_free_ctx_locked(dmar, ctx);
}
void
-iommu_free_ctx(struct iommu_ctx *context)
+dmar_free_ctx_method(struct iommu_ctx *context)
{
struct dmar_ctx *ctx;
ctx = IOCTX2CTX(context);
-
dmar_free_ctx(ctx);
}
diff --git a/sys/x86/iommu/intel_dmar.h b/sys/x86/iommu/intel_dmar.h
index b7f0300e16f0..188e40dec36c 100644
--- a/sys/x86/iommu/intel_dmar.h
+++ b/sys/x86/iommu/intel_dmar.h
@@ -87,15 +87,15 @@ struct dmar_ctx {
#define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock)
#define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED)
-#define DMAR2IOMMU(dmar) &((dmar)->iommu)
+#define DMAR2IOMMU(dmar) (&((dmar)->iommu))
#define IOMMU2DMAR(dmar) \
__containerof((dmar), struct dmar_unit, iommu)
-#define DOM2IODOM(domain) &((domain)->iodom)
+#define DOM2IODOM(domain) (&((domain)->iodom))
#define IODOM2DOM(domain) \
__containerof((domain), struct dmar_domain, iodom)
-#define CTX2IOCTX(ctx) &((ctx)->context)
+#define CTX2IOCTX(ctx) (&((ctx)->context))
#define IOCTX2CTX(ctx) \
__containerof((ctx), struct dmar_ctx, context)
@@ -103,26 +103,13 @@ struct dmar_ctx {
#define CTX2DMAR(ctx) (CTX2DOM(ctx)->dmar)
#define DOM2DMAR(domain) ((domain)->dmar)
-struct dmar_msi_data {
- int irq;
- int irq_rid;
- struct resource *irq_res;
- void *intr_handle;
- int (*handler)(void *);
- int msi_data_reg;
- int msi_addr_reg;
- int msi_uaddr_reg;
- void (*enable_intr)(struct dmar_unit *);
- void (*disable_intr)(struct dmar_unit *);
- const char *name;
-};
-
#define DMAR_INTR_FAULT 0
#define DMAR_INTR_QI 1
#define DMAR_INTR_TOTAL 2
struct dmar_unit {
struct iommu_unit iommu;
+ struct x86_unit_common x86c;
uint16_t segment;
uint64_t base;
@@ -130,8 +117,6 @@ struct dmar_unit {
int reg_rid;
struct resource *regs;
- struct dmar_msi_data intrs[DMAR_INTR_TOTAL];
-
/* Hardware registers cache */
uint32_t hw_ver;
uint64_t hw_cap;
@@ -155,17 +140,6 @@ struct dmar_unit {
/* QI */
int qi_enabled;
- char *inv_queue;
- vm_size_t inv_queue_size;
- uint32_t inv_queue_avail;
- uint32_t inv_queue_tail;
- volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait
- descr completion */
- uint64_t inv_waitd_seq_hw_phys;
- uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
- u_int inv_waitd_gen; /* seq number generation AKA seq overflows */
- u_int inv_seq_waiters; /* count of waiters for seq */
- u_int inv_queue_full; /* informational counter */
/* IR */
int ir_enabled;
@@ -173,41 +147,11 @@ struct dmar_unit {
dmar_irte_t *irt;
u_int irte_cnt;
vmem_t *irtids;
-
- /*
- * Delayed freeing of map entries queue processing:
- *
- * tlb_flush_head and tlb_flush_tail are used to implement a FIFO
- * queue that supports concurrent dequeues and enqueues. However,
- * there can only be a single dequeuer (accessing tlb_flush_head) and
- * a single enqueuer (accessing tlb_flush_tail) at a time. Since the
- * unit's qi_task is the only dequeuer, it can access tlb_flush_head
- * without any locking. In contrast, there may be multiple enqueuers,
- * so the enqueuers acquire the iommu unit lock to serialize their
- * accesses to tlb_flush_tail.
- *
- * In this FIFO queue implementation, the key to enabling concurrent
- * dequeues and enqueues is that the dequeuer never needs to access
- * tlb_flush_tail and the enqueuer never needs to access
- * tlb_flush_head. In particular, tlb_flush_head and tlb_flush_tail
- * are never NULL, so neither a dequeuer nor an enqueuer ever needs to
- * update both. Instead, tlb_flush_head always points to a "zombie"
- * struct, which previously held the last dequeued item. Thus, the
- * zombie's next field actually points to the struct holding the first
- * item in the queue. When an item is dequeued, the current zombie is
- * finally freed, and the struct that held the just dequeued item
- * becomes the new zombie. When the queue is empty, tlb_flush_tail
- * also points to the zombie.
- */
- struct iommu_map_entry *tlb_flush_head;
- struct iommu_map_entry *tlb_flush_tail;
- struct task qi_task;
- struct taskqueue *qi_taskqueue;
};
-#define DMAR_LOCK(dmar) mtx_lock(&(dmar)->iommu.lock)
-#define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->iommu.lock)
-#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED)
+#define DMAR_LOCK(dmar) mtx_lock(&DMAR2IOMMU(dmar)->lock)
+#define DMAR_UNLOCK(dmar) mtx_unlock(&DMAR2IOMMU(dmar)->lock)
+#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&DMAR2IOMMU(dmar)->lock, MA_OWNED)
#define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock)
#define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
@@ -222,6 +166,8 @@ struct dmar_unit {
#define DMAR_BARRIER_RMRR 0
#define DMAR_BARRIER_USEQ 1
+SYSCTL_DECL(_hw_iommu_dmar);
+
struct dmar_unit *dmar_find(device_t dev, bool verbose);
struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
@@ -231,9 +177,7 @@ bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
int domain_set_agaw(struct dmar_domain *domain, int mgaw);
int dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr,
bool allow_less);
-vm_pindex_t pglvl_max_pages(int pglvl);
int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
-iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
iommu_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
int calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size,
iommu_gaddr_t *isizep);
@@ -256,14 +200,14 @@ uint64_t dmar_get_timeout(void);
void dmar_update_timeout(uint64_t newval);
int dmar_fault_intr(void *arg);
-void dmar_enable_fault_intr(struct dmar_unit *unit);
-void dmar_disable_fault_intr(struct dmar_unit *unit);
+void dmar_enable_fault_intr(struct iommu_unit *unit);
+void dmar_disable_fault_intr(struct iommu_unit *unit);
int dmar_init_fault_log(struct dmar_unit *unit);
void dmar_fini_fault_log(struct dmar_unit *unit);
int dmar_qi_intr(void *arg);
-void dmar_enable_qi_intr(struct dmar_unit *unit);
-void dmar_disable_qi_intr(struct dmar_unit *unit);
+void dmar_enable_qi_intr(struct iommu_unit *unit);
+void dmar_disable_qi_intr(struct iommu_unit *unit);
int dmar_init_qi(struct dmar_unit *unit);
void dmar_fini_qi(struct dmar_unit *unit);
void dmar_qi_invalidate_locked(struct dmar_domain *domain,
@@ -280,8 +224,8 @@ vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
void put_idmap_pgtbl(vm_object_t obj);
void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
iommu_gaddr_t size);
-int domain_alloc_pgtbl(struct dmar_domain *domain);
-void domain_free_pgtbl(struct dmar_domain *domain);
+int dmar_domain_alloc_pgtbl(struct dmar_domain *domain);
+void dmar_domain_free_pgtbl(struct dmar_domain *domain);
extern const struct iommu_domain_map_ops dmar_domain_map_ops;
int dmar_dev_depth(device_t child);
@@ -293,10 +237,16 @@ struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
bool id_mapped, bool rmrr_init);
int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
-void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
-void dmar_free_ctx(struct dmar_ctx *ctx);
+void dmar_free_ctx_locked_method(struct iommu_unit *dmar,
+ struct iommu_ctx *ctx);
+void dmar_free_ctx_method(struct iommu_ctx *ctx);
struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
-void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free);
+struct iommu_ctx *dmar_get_ctx(struct iommu_unit *iommu, device_t dev,
+ uint16_t rid, bool id_mapped, bool rmrr_init);
+void dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free,
+ bool cansleep);
+void dmar_domain_unload(struct iommu_domain *iodom,
+ struct iommu_map_entries_tailq *entries, bool cansleep);
void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain,
int dev_busno, const void *dev_path, int dev_path_len,
@@ -308,9 +258,15 @@ void dmar_quirks_pre_use(struct iommu_unit *dmar);
int dmar_init_irt(struct dmar_unit *unit);
void dmar_fini_irt(struct dmar_unit *unit);
+int dmar_alloc_msi_intr(device_t src, u_int *cookies, u_int count);
+int dmar_map_msi_intr(device_t src, u_int cpu, u_int vector, u_int cookie,
+ uint64_t *addr, uint32_t *data);
+int dmar_unmap_msi_intr(device_t src, u_int cookie);
+int dmar_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
+ bool activehi, int irq, u_int *cookie, uint32_t *hi, uint32_t *lo);
+int dmar_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie);
extern int haw;
-extern int dmar_batch_coalesce;
extern int dmar_rmrr_enable;
static inline uint32_t
diff --git a/sys/x86/iommu/intel_drv.c b/sys/x86/iommu/intel_drv.c
index 636534173715..05fb49538add 100644
--- a/sys/x86/iommu/intel_drv.c
+++ b/sys/x86/iommu/intel_drv.c
@@ -64,6 +64,8 @@
#include
#include
#include
+#include
+#include
#include
#include
#include
@@ -228,22 +230,6 @@ dmar_probe(device_t dev)
return (BUS_PROBE_NOWILDCARD);
}
-static void
-dmar_release_intr(device_t dev, struct dmar_unit *unit, int idx)
-{
- struct dmar_msi_data *dmd;
-
- dmd = &unit->intrs[idx];
- if (dmd->irq == -1)
- return;
- bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle);
- bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res);
- bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid);
- PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)),
- dev, dmd->irq);
- dmd->irq = -1;
-}
-
static void
dmar_release_resources(device_t dev, struct dmar_unit *unit)
{
@@ -254,7 +240,7 @@ dmar_release_resources(device_t dev, struct dmar_unit *unit)
dmar_fini_qi(unit);
dmar_fini_fault_log(unit);
for (i = 0; i < DMAR_INTR_TOTAL; i++)
- dmar_release_intr(dev, unit, i);
+ iommu_release_intr(DMAR2IOMMU(unit), i);
if (unit->regs != NULL) {
bus_deactivate_resource(dev, SYS_RES_MEMORY, unit->reg_rid,
unit->regs);
@@ -272,84 +258,19 @@ dmar_release_resources(device_t dev, struct dmar_unit *unit)
}
}
-static int
-dmar_alloc_irq(device_t dev, struct dmar_unit *unit, int idx)
-{
- device_t pcib;
- struct dmar_msi_data *dmd;
- uint64_t msi_addr;
- uint32_t msi_data;
- int error;
-
- dmd = &unit->intrs[idx];
- pcib = device_get_parent(device_get_parent(dev)); /* Really not pcib */
- error = PCIB_ALLOC_MSIX(pcib, dev, &dmd->irq);
- if (error != 0) {
- device_printf(dev, "cannot allocate %s interrupt, %d\n",
- dmd->name, error);
- goto err1;
- }
- error = bus_set_resource(dev, SYS_RES_IRQ, dmd->irq_rid,
- dmd->irq, 1);
- if (error != 0) {
- device_printf(dev, "cannot set %s interrupt resource, %d\n",
- dmd->name, error);
- goto err2;
- }
- dmd->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
- &dmd->irq_rid, RF_ACTIVE);
- if (dmd->irq_res == NULL) {
- device_printf(dev,
- "cannot allocate resource for %s interrupt\n", dmd->name);
- error = ENXIO;
- goto err3;
- }
- error = bus_setup_intr(dev, dmd->irq_res, INTR_TYPE_MISC,
- dmd->handler, NULL, unit, &dmd->intr_handle);
- if (error != 0) {
- device_printf(dev, "cannot setup %s interrupt, %d\n",
- dmd->name, error);
- goto err4;
- }
- bus_describe_intr(dev, dmd->irq_res, dmd->intr_handle, "%s", dmd->name);
- error = PCIB_MAP_MSI(pcib, dev, dmd->irq, &msi_addr, &msi_data);
- if (error != 0) {
- device_printf(dev, "cannot map %s interrupt, %d\n",
- dmd->name, error);
- goto err5;
- }
- dmar_write4(unit, dmd->msi_data_reg, msi_data);
- dmar_write4(unit, dmd->msi_addr_reg, msi_addr);
- /* Only for xAPIC mode */
- dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32);
- return (0);
-
-err5:
- bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle);
-err4:
- bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res);
-err3:
- bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid);
-err2:
- PCIB_RELEASE_MSIX(pcib, dev, dmd->irq);
- dmd->irq = -1;
-err1:
- return (error);
-}
-
#ifdef DEV_APIC
static int
dmar_remap_intr(device_t dev, device_t child, u_int irq)
{
struct dmar_unit *unit;
- struct dmar_msi_data *dmd;
+ struct iommu_msi_data *dmd;
uint64_t msi_addr;
uint32_t msi_data;
int i, error;
unit = device_get_softc(dev);
for (i = 0; i < DMAR_INTR_TOTAL; i++) {
- dmd = &unit->intrs[i];
+ dmd = &unit->x86c.intrs[i];
if (irq == dmd->irq) {
error = PCIB_MAP_MSI(device_get_parent(
device_get_parent(dev)),
@@ -357,11 +278,14 @@ dmar_remap_intr(device_t dev, device_t child, u_int irq)
if (error != 0)
return (error);
DMAR_LOCK(unit);
- (dmd->disable_intr)(unit);
- dmar_write4(unit, dmd->msi_data_reg, msi_data);
- dmar_write4(unit, dmd->msi_addr_reg, msi_addr);
- dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32);
- (dmd->enable_intr)(unit);
+ dmd->msi_data = msi_data;
+ dmd->msi_addr = msi_addr;
+ (dmd->disable_intr)(DMAR2IOMMU(unit));
+ dmar_write4(unit, dmd->msi_data_reg, dmd->msi_data);
+ dmar_write4(unit, dmd->msi_addr_reg, dmd->msi_addr);
+ dmar_write4(unit, dmd->msi_uaddr_reg,
+ dmd->msi_addr >> 32);
+ (dmd->enable_intr)(DMAR2IOMMU(unit));
DMAR_UNLOCK(unit);
return (0);
}
@@ -405,6 +329,7 @@ dmar_attach(device_t dev)
{
struct dmar_unit *unit;
ACPI_DMAR_HARDWARE_UNIT *dmaru;
+ struct iommu_msi_data *dmd;
uint64_t timeout;
int disable_pmr;
int i, error;
@@ -437,37 +362,47 @@ dmar_attach(device_t dev)
dmar_update_timeout(timeout);
for (i = 0; i < DMAR_INTR_TOTAL; i++)
- unit->intrs[i].irq = -1;
-
- unit->intrs[DMAR_INTR_FAULT].name = "fault";
- unit->intrs[DMAR_INTR_FAULT].irq_rid = DMAR_FAULT_IRQ_RID;
- unit->intrs[DMAR_INTR_FAULT].handler = dmar_fault_intr;
- unit->intrs[DMAR_INTR_FAULT].msi_data_reg = DMAR_FEDATA_REG;
- unit->intrs[DMAR_INTR_FAULT].msi_addr_reg = DMAR_FEADDR_REG;
- unit->intrs[DMAR_INTR_FAULT].msi_uaddr_reg = DMAR_FEUADDR_REG;
- unit->intrs[DMAR_INTR_FAULT].enable_intr = dmar_enable_fault_intr;
- unit->intrs[DMAR_INTR_FAULT].disable_intr = dmar_disable_fault_intr;
- error = dmar_alloc_irq(dev, unit, DMAR_INTR_FAULT);
+ unit->x86c.intrs[i].irq = -1;
+
+ dmd = &unit->x86c.intrs[DMAR_INTR_FAULT];
+ dmd->name = "fault";
+ dmd->irq_rid = DMAR_FAULT_IRQ_RID;
+ dmd->handler = dmar_fault_intr;
+ dmd->msi_data_reg = DMAR_FEDATA_REG;
+ dmd->msi_addr_reg = DMAR_FEADDR_REG;
+ dmd->msi_uaddr_reg = DMAR_FEUADDR_REG;
+ dmd->enable_intr = dmar_enable_fault_intr;
+ dmd->disable_intr = dmar_disable_fault_intr;
+ error = iommu_alloc_irq(DMAR2IOMMU(unit), DMAR_INTR_FAULT);
if (error != 0) {
dmar_release_resources(dev, unit);
dmar_devs[unit->iommu.unit] = NULL;
return (error);
}
+ dmar_write4(unit, dmd->msi_data_reg, dmd->msi_data);
+ dmar_write4(unit, dmd->msi_addr_reg, dmd->msi_addr);
+ dmar_write4(unit, dmd->msi_uaddr_reg, dmd->msi_addr >> 32);
+
if (DMAR_HAS_QI(unit)) {
- unit->intrs[DMAR_INTR_QI].name = "qi";
- unit->intrs[DMAR_INTR_QI].irq_rid = DMAR_QI_IRQ_RID;
- unit->intrs[DMAR_INTR_QI].handler = dmar_qi_intr;
- unit->intrs[DMAR_INTR_QI].msi_data_reg = DMAR_IEDATA_REG;
- unit->intrs[DMAR_INTR_QI].msi_addr_reg = DMAR_IEADDR_REG;
- unit->intrs[DMAR_INTR_QI].msi_uaddr_reg = DMAR_IEUADDR_REG;
- unit->intrs[DMAR_INTR_QI].enable_intr = dmar_enable_qi_intr;
- unit->intrs[DMAR_INTR_QI].disable_intr = dmar_disable_qi_intr;
- error = dmar_alloc_irq(dev, unit, DMAR_INTR_QI);
+ dmd = &unit->x86c.intrs[DMAR_INTR_QI];
+ dmd->name = "qi";
+ dmd->irq_rid = DMAR_QI_IRQ_RID;
+ dmd->handler = dmar_qi_intr;
+ dmd->msi_data_reg = DMAR_IEDATA_REG;
+ dmd->msi_addr_reg = DMAR_IEADDR_REG;
+ dmd->msi_uaddr_reg = DMAR_IEUADDR_REG;
+ dmd->enable_intr = dmar_enable_qi_intr;
+ dmd->disable_intr = dmar_disable_qi_intr;
+ error = iommu_alloc_irq(DMAR2IOMMU(unit), DMAR_INTR_QI);
if (error != 0) {
dmar_release_resources(dev, unit);
dmar_devs[unit->iommu.unit] = NULL;
return (error);
}
+
+ dmar_write4(unit, dmd->msi_data_reg, dmd->msi_data);
+ dmar_write4(unit, dmd->msi_addr_reg, dmd->msi_addr);
+ dmar_write4(unit, dmd->msi_uaddr_reg, dmd->msi_addr >> 32);
}
mtx_init(&unit->iommu.lock, "dmarhw", NULL, MTX_DEF);
@@ -1300,20 +1235,20 @@ dmar_print_one(int idx, bool show_domains, bool show_mappings)
db_printf("qi is enabled: queue @0x%jx (IQA 0x%jx) "
"size 0x%jx\n"
" head 0x%x tail 0x%x avail 0x%x status 0x%x ctrl 0x%x\n"
- " hw compl 0x%x@%p/phys@%jx next seq 0x%x gen 0x%x\n",
- (uintmax_t)unit->inv_queue,
+ " hw compl 0x%jx@%p/phys@%jx next seq 0x%x gen 0x%x\n",
+ (uintmax_t)unit->x86c.inv_queue,
(uintmax_t)dmar_read8(unit, DMAR_IQA_REG),
- (uintmax_t)unit->inv_queue_size,
+ (uintmax_t)unit->x86c.inv_queue_size,
dmar_read4(unit, DMAR_IQH_REG),
dmar_read4(unit, DMAR_IQT_REG),
- unit->inv_queue_avail,
+ unit->x86c.inv_queue_avail,
dmar_read4(unit, DMAR_ICS_REG),
dmar_read4(unit, DMAR_IECTL_REG),
- unit->inv_waitd_seq_hw,
- &unit->inv_waitd_seq_hw,
- (uintmax_t)unit->inv_waitd_seq_hw_phys,
- unit->inv_waitd_seq,
- unit->inv_waitd_gen);
+ (uintmax_t)unit->x86c.inv_waitd_seq_hw,
+ &unit->x86c.inv_waitd_seq_hw,
+ (uintmax_t)unit->x86c.inv_waitd_seq_hw_phys,
+ unit->x86c.inv_waitd_seq,
+ unit->x86c.inv_waitd_gen);
} else {
db_printf("qi is disabled\n");
}
@@ -1357,12 +1292,52 @@ DB_SHOW_ALL_COMMAND(dmars, db_show_all_dmars)
}
#endif
-struct iommu_unit *
-iommu_find(device_t dev, bool verbose)
+static struct iommu_unit *
+dmar_find_method(device_t dev, bool verbose)
{
struct dmar_unit *dmar;
dmar = dmar_find(dev, verbose);
-
return (&dmar->iommu);
}
+
+static struct x86_unit_common *
+dmar_get_x86_common(struct iommu_unit *unit)
+{
+ struct dmar_unit *dmar;
+
+ dmar = IOMMU2DMAR(unit);
+ return (&dmar->x86c);
+}
+
+static void
+dmar_unit_pre_instantiate_ctx(struct iommu_unit *unit)
+{
+ dmar_quirks_pre_use(unit);
+ dmar_instantiate_rmrr_ctxs(unit);
+}
+
+static struct x86_iommu dmar_x86_iommu = {
+ .get_x86_common = dmar_get_x86_common,
+ .unit_pre_instantiate_ctx = dmar_unit_pre_instantiate_ctx,
+ .domain_unload_entry = dmar_domain_unload_entry,
+ .domain_unload = dmar_domain_unload,
+ .get_ctx = dmar_get_ctx,
+ .free_ctx_locked = dmar_free_ctx_locked_method,
+ .free_ctx = dmar_free_ctx_method,
+ .find = dmar_find_method,
+ .alloc_msi_intr = dmar_alloc_msi_intr,
+ .map_msi_intr = dmar_map_msi_intr,
+ .unmap_msi_intr = dmar_unmap_msi_intr,
+ .map_ioapic_intr = dmar_map_ioapic_intr,
+ .unmap_ioapic_intr = dmar_unmap_ioapic_intr,
+};
+
+static void
+x86_iommu_set_intel(void *arg __unused)
+{
+ if (cpu_vendor_id == CPU_VENDOR_INTEL)
+ set_x86_iommu(&dmar_x86_iommu);
+}
+
+SYSINIT(x86_iommu, SI_SUB_TUNABLES, SI_ORDER_ANY, x86_iommu_set_intel, NULL);
diff --git a/sys/x86/iommu/intel_fault.c b/sys/x86/iommu/intel_fault.c
index 59b482720cf1..1064165ea5d7 100644
--- a/sys/x86/iommu/intel_fault.c
+++ b/sys/x86/iommu/intel_fault.c
@@ -127,7 +127,7 @@ dmar_fault_intr(void *arg)
int fri, frir, faultp;
bool enqueue;
- unit = arg;
+ unit = IOMMU2DMAR((struct iommu_unit *)arg);
enqueue = false;
fsts = dmar_read4(unit, DMAR_FSTS_REG);
dmar_fault_intr_clear(unit, fsts);
@@ -276,9 +276,9 @@ dmar_init_fault_log(struct dmar_unit *unit)
"dmar%d fault taskq", unit->iommu.unit);
DMAR_LOCK(unit);
- dmar_disable_fault_intr(unit);
+ dmar_disable_fault_intr(&unit->iommu);
dmar_clear_faults(unit);
- dmar_enable_fault_intr(unit);
+ dmar_enable_fault_intr(&unit->iommu);
DMAR_UNLOCK(unit);
return (0);
@@ -292,7 +292,7 @@ dmar_fini_fault_log(struct dmar_unit *unit)
return;
DMAR_LOCK(unit);
- dmar_disable_fault_intr(unit);
+ dmar_disable_fault_intr(&unit->iommu);
DMAR_UNLOCK(unit);
taskqueue_drain(unit->fault_taskqueue, &unit->fault_task);
@@ -306,10 +306,12 @@ dmar_fini_fault_log(struct dmar_unit *unit)
}
void
-dmar_enable_fault_intr(struct dmar_unit *unit)
+dmar_enable_fault_intr(struct iommu_unit *iommu)
{
+ struct dmar_unit *unit;
uint32_t fectl;
+ unit = IOMMU2DMAR(iommu);
DMAR_ASSERT_LOCKED(unit);
fectl = dmar_read4(unit, DMAR_FECTL_REG);
fectl &= ~DMAR_FECTL_IM;
@@ -317,10 +319,12 @@ dmar_enable_fault_intr(struct dmar_unit *unit)
}
void
-dmar_disable_fault_intr(struct dmar_unit *unit)
+dmar_disable_fault_intr(struct iommu_unit *iommu)
{
+ struct dmar_unit *unit;
uint32_t fectl;
+ unit = IOMMU2DMAR(iommu);
DMAR_ASSERT_LOCKED(unit);
fectl = dmar_read4(unit, DMAR_FECTL_REG);
dmar_write4(unit, DMAR_FECTL_REG, fectl | DMAR_FECTL_IM);
diff --git a/sys/x86/iommu/intel_idpgtbl.c b/sys/x86/iommu/intel_idpgtbl.c
index de38a6fece94..fbc0e9e97b64 100644
--- a/sys/x86/iommu/intel_idpgtbl.c
+++ b/sys/x86/iommu/intel_idpgtbl.c
@@ -316,40 +316,6 @@ put_idmap_pgtbl(vm_object_t obj)
* address. Support superpages.
*/
-/*
- * Index of the pte for the guest address base in the page table at
- * the level lvl.
- */
-static int
-domain_pgtbl_pte_off(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
-{
-
- base >>= IOMMU_PAGE_SHIFT + (domain->pglvl - lvl - 1) *
- IOMMU_NPTEPGSHIFT;
- return (base & IOMMU_PTEMASK);
-}
-
-/*
- * Returns the page index of the page table page in the page table
- * object, which maps the given address base at the page table level
- * lvl.
- */
-static vm_pindex_t
-domain_pgtbl_get_pindex(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
-{
- vm_pindex_t idx, pidx;
- int i;
-
- KASSERT(lvl >= 0 && lvl < domain->pglvl,
- ("wrong lvl %p %d", domain, lvl));
-
- for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx) {
- idx = domain_pgtbl_pte_off(domain, base, i) +
- pidx * IOMMU_NPTEPG + 1;
- }
- return (idx);
-}
-
static iommu_pte_t *
domain_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
int flags, vm_pindex_t *idxp, struct sf_buf **sf)
@@ -362,7 +328,7 @@ domain_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
KASSERT((flags & IOMMU_PGF_OBJL) != 0, ("lost PGF_OBJL"));
- idx = domain_pgtbl_get_pindex(domain, base, lvl);
+ idx = pglvl_pgtbl_get_pindex(domain->pglvl, base, lvl);
if (*sf != NULL && idx == *idxp) {
pte = (iommu_pte_t *)sf_buf_kva(*sf);
} else {
@@ -414,7 +380,7 @@ domain_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
goto retry;
}
}
- pte += domain_pgtbl_pte_off(domain, base, lvl);
+ pte += pglvl_pgtbl_pte_off(domain->pglvl, base, lvl);
return (pte);
}
@@ -512,7 +478,7 @@ domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
domain = IODOM2DOM(iodom);
unit = domain->dmar;
- KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0,
+ KASSERT((iodom->flags & IOMMU_DOMAIN_IDMAP) == 0,
("modifying idmap pagetable domain %p", domain));
KASSERT((base & IOMMU_PAGE_MASK) == 0,
("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
@@ -696,7 +662,7 @@ domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
}
int
-domain_alloc_pgtbl(struct dmar_domain *domain)
+dmar_domain_alloc_pgtbl(struct dmar_domain *domain)
{
vm_page_t m;
@@ -718,7 +684,7 @@ domain_alloc_pgtbl(struct dmar_domain *domain)
}
void
-domain_free_pgtbl(struct dmar_domain *domain)
+dmar_domain_free_pgtbl(struct dmar_domain *domain)
{
vm_object_t obj;
vm_page_t m;
diff --git a/sys/x86/iommu/intel_intrmap.c b/sys/x86/iommu/intel_intrmap.c
index 560dc9c1d91c..f05ad08de37a 100644
--- a/sys/x86/iommu/intel_intrmap.c
+++ b/sys/x86/iommu/intel_intrmap.c
@@ -65,7 +65,7 @@ static void dmar_ir_program_irte(struct dmar_unit *unit, u_int idx,
static int dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie);
int
-iommu_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
+dmar_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
{
struct dmar_unit *unit;
vmem_addr_t vmem_res;
@@ -93,7 +93,7 @@ iommu_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
}
int
-iommu_map_msi_intr(device_t src, u_int cpu, u_int vector, u_int cookie,
+dmar_map_msi_intr(device_t src, u_int cpu, u_int vector, u_int cookie,
uint64_t *addr, uint32_t *data)
{
struct dmar_unit *unit;
@@ -139,7 +139,7 @@ iommu_map_msi_intr(device_t src, u_int cpu, u_int vector, u_int cookie,
}
int
-iommu_unmap_msi_intr(device_t src, u_int cookie)
+dmar_unmap_msi_intr(device_t src, u_int cookie)
{
struct dmar_unit *unit;
@@ -150,7 +150,7 @@ iommu_unmap_msi_intr(device_t src, u_int cookie)
}
int
-iommu_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
+dmar_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
bool activehi, int irq, u_int *cookie, uint32_t *hi, uint32_t *lo)
{
struct dmar_unit *unit;
@@ -213,7 +213,7 @@ iommu_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
}
int
-iommu_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie)
+dmar_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie)
{
struct dmar_unit *unit;
u_int idx;
@@ -323,6 +323,7 @@ dmar_init_irt(struct dmar_unit *unit)
return (0);
unit->ir_enabled = 1;
TUNABLE_INT_FETCH("hw.dmar.ir", &unit->ir_enabled);
+ TUNABLE_INT_FETCH("hw.iommu.ir", &unit->ir_enabled);
if (!unit->ir_enabled)
return (0);
if (!unit->qi_enabled) {
diff --git a/sys/x86/iommu/intel_qi.c b/sys/x86/iommu/intel_qi.c
index 590cbac9bcbd..c11946ad9447 100644
--- a/sys/x86/iommu/intel_qi.c
+++ b/sys/x86/iommu/intel_qi.c
@@ -58,17 +58,6 @@
#include
#include
-static bool
-dmar_qi_seq_processed(const struct dmar_unit *unit,
- const struct iommu_qi_genseq *pseq)
-{
- u_int gen;
-
- gen = unit->inv_waitd_gen;
- return (pseq->gen < gen ||
- (pseq->gen == gen && pseq->seq <= unit->inv_waitd_seq_hw));
-}
-
static int
dmar_enable_qi(struct dmar_unit *unit)
{
@@ -96,32 +85,36 @@ dmar_disable_qi(struct dmar_unit *unit)
}
static void
-dmar_qi_advance_tail(struct dmar_unit *unit)
+dmar_qi_advance_tail(struct iommu_unit *iommu)
{
+ struct dmar_unit *unit;
+ unit = IOMMU2DMAR(iommu);
DMAR_ASSERT_LOCKED(unit);
- dmar_write4(unit, DMAR_IQT_REG, unit->inv_queue_tail);
+ dmar_write4(unit, DMAR_IQT_REG, unit->x86c.inv_queue_tail);
}
static void
-dmar_qi_ensure(struct dmar_unit *unit, int descr_count)
+dmar_qi_ensure(struct iommu_unit *iommu, int descr_count)
{
+ struct dmar_unit *unit;
uint32_t head;
int bytes;
+ unit = IOMMU2DMAR(iommu);
DMAR_ASSERT_LOCKED(unit);
bytes = descr_count << DMAR_IQ_DESCR_SZ_SHIFT;
for (;;) {
- if (bytes <= unit->inv_queue_avail)
+ if (bytes <= unit->x86c.inv_queue_avail)
break;
/* refill */
head = dmar_read4(unit, DMAR_IQH_REG);
head &= DMAR_IQH_MASK;
- unit->inv_queue_avail = head - unit->inv_queue_tail -
+ unit->x86c.inv_queue_avail = head - unit->x86c.inv_queue_tail -
DMAR_IQ_DESCR_SZ;
- if (head <= unit->inv_queue_tail)
- unit->inv_queue_avail += unit->inv_queue_size;
- if (bytes <= unit->inv_queue_avail)
+ if (head <= unit->x86c.inv_queue_tail)
+ unit->x86c.inv_queue_avail += unit->x86c.inv_queue_size;
+ if (bytes <= unit->x86c.inv_queue_avail)
break;
/*
@@ -134,11 +127,11 @@ dmar_qi_ensure(struct dmar_unit *unit, int descr_count)
* See dmar_qi_invalidate_locked() for a discussion
* about data race prevention.
*/
- dmar_qi_advance_tail(unit);
- unit->inv_queue_full++;
+ dmar_qi_advance_tail(DMAR2IOMMU(unit));
+ unit->x86c.inv_queue_full++;
cpu_spinwait();
}
- unit->inv_queue_avail -= bytes;
+ unit->x86c.inv_queue_avail -= bytes;
}
static void
@@ -146,208 +139,106 @@ dmar_qi_emit(struct dmar_unit *unit, uint64_t data1, uint64_t data2)
{
DMAR_ASSERT_LOCKED(unit);
- *(volatile uint64_t *)(unit->inv_queue + unit->inv_queue_tail) = data1;
- unit->inv_queue_tail += DMAR_IQ_DESCR_SZ / 2;
- KASSERT(unit->inv_queue_tail <= unit->inv_queue_size,
- ("tail overflow 0x%x 0x%jx", unit->inv_queue_tail,
- (uintmax_t)unit->inv_queue_size));
- unit->inv_queue_tail &= unit->inv_queue_size - 1;
- *(volatile uint64_t *)(unit->inv_queue + unit->inv_queue_tail) = data2;
- unit->inv_queue_tail += DMAR_IQ_DESCR_SZ / 2;
- KASSERT(unit->inv_queue_tail <= unit->inv_queue_size,
- ("tail overflow 0x%x 0x%jx", unit->inv_queue_tail,
- (uintmax_t)unit->inv_queue_size));
- unit->inv_queue_tail &= unit->inv_queue_size - 1;
+#ifdef __LP64__
+ atomic_store_64((uint64_t *)(unit->x86c.inv_queue +
+ unit->x86c.inv_queue_tail), data1);
+#else
+ *(volatile uint64_t *)(unit->x86c.inv_queue +
+ unit->x86c.inv_queue_tail) = data1;
+#endif
+ unit->x86c.inv_queue_tail += DMAR_IQ_DESCR_SZ / 2;
+ KASSERT(unit->x86c.inv_queue_tail <= unit->x86c.inv_queue_size,
+ ("tail overflow 0x%x 0x%jx", unit->x86c.inv_queue_tail,
+ (uintmax_t)unit->x86c.inv_queue_size));
+ unit->x86c.inv_queue_tail &= unit->x86c.inv_queue_size - 1;
+#ifdef __LP64__
+ atomic_store_64((uint64_t *)(unit->x86c.inv_queue +
+ unit->x86c.inv_queue_tail), data2);
+#else
+ *(volatile uint64_t *)(unit->x86c.inv_queue +
+ unit->x86c.inv_queue_tail) = data2;
+#endif
+ unit->x86c.inv_queue_tail += DMAR_IQ_DESCR_SZ / 2;
+ KASSERT(unit->x86c.inv_queue_tail <= unit->x86c.inv_queue_size,
+ ("tail overflow 0x%x 0x%jx", unit->x86c.inv_queue_tail,
+ (uintmax_t)unit->x86c.inv_queue_size));
+ unit->x86c.inv_queue_tail &= unit->x86c.inv_queue_size - 1;
}
static void
-dmar_qi_emit_wait_descr(struct dmar_unit *unit, uint32_t seq, bool intr,
+dmar_qi_emit_wait_descr(struct iommu_unit *iommu, uint32_t seq, bool intr,
bool memw, bool fence)
{
+ struct dmar_unit *unit;
+ unit = IOMMU2DMAR(iommu);
DMAR_ASSERT_LOCKED(unit);
dmar_qi_emit(unit, DMAR_IQ_DESCR_WAIT_ID |
(intr ? DMAR_IQ_DESCR_WAIT_IF : 0) |
(memw ? DMAR_IQ_DESCR_WAIT_SW : 0) |
(fence ? DMAR_IQ_DESCR_WAIT_FN : 0) |
(memw ? DMAR_IQ_DESCR_WAIT_SD(seq) : 0),
- memw ? unit->inv_waitd_seq_hw_phys : 0);
+ memw ? unit->x86c.inv_waitd_seq_hw_phys : 0);
}
static void
-dmar_qi_emit_wait_seq(struct dmar_unit *unit, struct iommu_qi_genseq *pseq,
- bool emit_wait)
-{
- struct iommu_qi_genseq gsec;
- uint32_t seq;
-
- KASSERT(pseq != NULL, ("wait descriptor with no place for seq"));
- DMAR_ASSERT_LOCKED(unit);
- if (unit->inv_waitd_seq == 0xffffffff) {
- gsec.gen = unit->inv_waitd_gen;
- gsec.seq = unit->inv_waitd_seq;
- dmar_qi_ensure(unit, 1);
- dmar_qi_emit_wait_descr(unit, gsec.seq, false, true, false);
- dmar_qi_advance_tail(unit);
- while (!dmar_qi_seq_processed(unit, &gsec))
- cpu_spinwait();
- unit->inv_waitd_gen++;
- unit->inv_waitd_seq = 1;
- }
- seq = unit->inv_waitd_seq++;
- pseq->gen = unit->inv_waitd_gen;
- pseq->seq = seq;
- if (emit_wait) {
- dmar_qi_ensure(unit, 1);
- dmar_qi_emit_wait_descr(unit, seq, true, true, false);
- }
-}
-
-/*
- * To avoid missed wakeups, callers must increment the unit's waiters count
- * before advancing the tail past the wait descriptor.
- */
-static void
-dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct iommu_qi_genseq *gseq,
- bool nowait)
-{
-
- DMAR_ASSERT_LOCKED(unit);
- KASSERT(unit->inv_seq_waiters > 0, ("%s: no waiters", __func__));
- while (!dmar_qi_seq_processed(unit, gseq)) {
- if (cold || nowait) {
- cpu_spinwait();
- } else {
- msleep(&unit->inv_seq_waiters, &unit->iommu.lock, 0,
- "dmarse", hz);
- }
- }
- unit->inv_seq_waiters--;
-}
-
-static void
-dmar_qi_invalidate_emit(struct dmar_domain *domain, iommu_gaddr_t base,
+dmar_qi_invalidate_emit(struct iommu_domain *idomain, iommu_gaddr_t base,
iommu_gaddr_t size, struct iommu_qi_genseq *pseq, bool emit_wait)
{
struct dmar_unit *unit;
+ struct dmar_domain *domain;
iommu_gaddr_t isize;
int am;
+ domain = __containerof(idomain, struct dmar_domain, iodom);
unit = domain->dmar;
DMAR_ASSERT_LOCKED(unit);
for (; size > 0; base += isize, size -= isize) {
am = calc_am(unit, base, size, &isize);
- dmar_qi_ensure(unit, 1);
+ dmar_qi_ensure(DMAR2IOMMU(unit), 1);
dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV |
DMAR_IQ_DESCR_IOTLB_PAGE | DMAR_IQ_DESCR_IOTLB_DW |
DMAR_IQ_DESCR_IOTLB_DR |
DMAR_IQ_DESCR_IOTLB_DID(domain->domain),
base | am);
}
- dmar_qi_emit_wait_seq(unit, pseq, emit_wait);
-}
-
-/*
- * The caller must not be using the entry's dmamap_link field.
- */
-void
-dmar_qi_invalidate_locked(struct dmar_domain *domain,
- struct iommu_map_entry *entry, bool emit_wait)
-{
- struct dmar_unit *unit;
-
- unit = domain->dmar;
- DMAR_ASSERT_LOCKED(unit);
- dmar_qi_invalidate_emit(domain, entry->start, entry->end -
- entry->start, &entry->gseq, emit_wait);
-
- /*
- * To avoid a data race in dmar_qi_task(), the entry's gseq must be
- * initialized before the entry is added to the TLB flush list, and the
- * entry must be added to that list before the tail is advanced. More
- * precisely, the tail must not be advanced past the wait descriptor
- * that will generate the interrupt that schedules dmar_qi_task() for
- * execution before the entry is added to the list. While an earlier
- * call to dmar_qi_ensure() might have advanced the tail, it will not
- * advance it past the wait descriptor.
- *
- * See the definition of struct dmar_unit for more information on
- * synchronization.
- */
- entry->tlb_flush_next = NULL;
- atomic_store_rel_ptr((uintptr_t *)&unit->tlb_flush_tail->tlb_flush_next,
- (uintptr_t)entry);
- unit->tlb_flush_tail = entry;
-
- dmar_qi_advance_tail(unit);
+ iommu_qi_emit_wait_seq(DMAR2IOMMU(unit), pseq, emit_wait);
}
-void
-dmar_qi_invalidate_sync(struct dmar_domain *domain, iommu_gaddr_t base,
- iommu_gaddr_t size, bool cansleep)
+static void
+dmar_qi_invalidate_glob_impl(struct dmar_unit *unit, uint64_t data1)
{
- struct dmar_unit *unit;
struct iommu_qi_genseq gseq;
- unit = domain->dmar;
- DMAR_LOCK(unit);
- dmar_qi_invalidate_emit(domain, base, size, &gseq, true);
-
- /*
- * To avoid a missed wakeup in dmar_qi_task(), the unit's waiters count
- * must be incremented before the tail is advanced.
- */
- unit->inv_seq_waiters++;
-
- dmar_qi_advance_tail(unit);
- dmar_qi_wait_for_seq(unit, &gseq, !cansleep);
- DMAR_UNLOCK(unit);
+ DMAR_ASSERT_LOCKED(unit);
+ dmar_qi_ensure(DMAR2IOMMU(unit), 2);
+ dmar_qi_emit(unit, data1, 0);
+ iommu_qi_emit_wait_seq(DMAR2IOMMU(unit), &gseq, true);
+ /* See dmar_qi_invalidate_sync(). */
+ unit->x86c.inv_seq_waiters++;
+ dmar_qi_advance_tail(DMAR2IOMMU(unit));
+ iommu_qi_wait_for_seq(DMAR2IOMMU(unit), &gseq, false);
}
void
dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit)
{
- struct iommu_qi_genseq gseq;
-
- DMAR_ASSERT_LOCKED(unit);
- dmar_qi_ensure(unit, 2);
- dmar_qi_emit(unit, DMAR_IQ_DESCR_CTX_INV | DMAR_IQ_DESCR_CTX_GLOB, 0);
- dmar_qi_emit_wait_seq(unit, &gseq, true);
- /* See dmar_qi_invalidate_sync(). */
- unit->inv_seq_waiters++;
- dmar_qi_advance_tail(unit);
- dmar_qi_wait_for_seq(unit, &gseq, false);
+ dmar_qi_invalidate_glob_impl(unit, DMAR_IQ_DESCR_CTX_INV |
+ DMAR_IQ_DESCR_CTX_GLOB);
}
void
dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit)
{
- struct iommu_qi_genseq gseq;
-
- DMAR_ASSERT_LOCKED(unit);
- dmar_qi_ensure(unit, 2);
- dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV | DMAR_IQ_DESCR_IOTLB_GLOB |
- DMAR_IQ_DESCR_IOTLB_DW | DMAR_IQ_DESCR_IOTLB_DR, 0);
- dmar_qi_emit_wait_seq(unit, &gseq, true);
- /* See dmar_qi_invalidate_sync(). */
- unit->inv_seq_waiters++;
- dmar_qi_advance_tail(unit);
- dmar_qi_wait_for_seq(unit, &gseq, false);
+ dmar_qi_invalidate_glob_impl(unit, DMAR_IQ_DESCR_IOTLB_INV |
+ DMAR_IQ_DESCR_IOTLB_GLOB | DMAR_IQ_DESCR_IOTLB_DW |
+ DMAR_IQ_DESCR_IOTLB_DR);
}
void
dmar_qi_invalidate_iec_glob(struct dmar_unit *unit)
{
- struct iommu_qi_genseq gseq;
-
- DMAR_ASSERT_LOCKED(unit);
- dmar_qi_ensure(unit, 2);
- dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV, 0);
- dmar_qi_emit_wait_seq(unit, &gseq, true);
- /* See dmar_qi_invalidate_sync(). */
- unit->inv_seq_waiters++;
- dmar_qi_advance_tail(unit);
- dmar_qi_wait_for_seq(unit, &gseq, false);
+ dmar_qi_invalidate_glob_impl(unit, DMAR_IQ_DESCR_IEC_INV);
}
void
@@ -363,21 +254,21 @@ dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt)
for (; cnt > 0; cnt -= c, start += c) {
l = ffs(start | cnt) - 1;
c = 1 << l;
- dmar_qi_ensure(unit, 1);
+ dmar_qi_ensure(DMAR2IOMMU(unit), 1);
dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV |
DMAR_IQ_DESCR_IEC_IDX | DMAR_IQ_DESCR_IEC_IIDX(start) |
DMAR_IQ_DESCR_IEC_IM(l), 0);
}
- dmar_qi_ensure(unit, 1);
- dmar_qi_emit_wait_seq(unit, &gseq, true);
+ dmar_qi_ensure(DMAR2IOMMU(unit), 1);
+ iommu_qi_emit_wait_seq(DMAR2IOMMU(unit), &gseq, true);
/*
- * Since dmar_qi_wait_for_seq() will not sleep, this increment's
+ * Since iommu_qi_wait_for_seq() will not sleep, this increment's
* placement relative to advancing the tail doesn't matter.
*/
- unit->inv_seq_waiters++;
+ unit->x86c.inv_seq_waiters++;
- dmar_qi_advance_tail(unit);
+ dmar_qi_advance_tail(DMAR2IOMMU(unit));
/*
* The caller of the function, in particular,
@@ -394,7 +285,7 @@ dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt)
* queue is processed, which includes requests possibly issued
* before our request.
*/
- dmar_qi_wait_for_seq(unit, &gseq, true);
+ iommu_qi_wait_for_seq(DMAR2IOMMU(unit), &gseq, true);
}
int
@@ -402,41 +293,21 @@ dmar_qi_intr(void *arg)
{
struct dmar_unit *unit;
- unit = arg;
+ unit = IOMMU2DMAR((struct iommu_unit *)arg);
KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled",
unit->iommu.unit));
- taskqueue_enqueue(unit->qi_taskqueue, &unit->qi_task);
+ taskqueue_enqueue(unit->x86c.qi_taskqueue, &unit->x86c.qi_task);
return (FILTER_HANDLED);
}
-static void
-dmar_qi_drain_tlb_flush(struct dmar_unit *unit)
-{
- struct iommu_map_entry *entry, *head;
-
- for (head = unit->tlb_flush_head;; head = entry) {
- entry = (struct iommu_map_entry *)
- atomic_load_acq_ptr((uintptr_t *)&head->tlb_flush_next);
- if (entry == NULL ||
- !dmar_qi_seq_processed(unit, &entry->gseq))
- break;
- unit->tlb_flush_head = entry;
- iommu_gas_free_entry(head);
- if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
- iommu_gas_free_region(entry);
- else
- iommu_gas_free_space(entry);
- }
-}
-
static void
dmar_qi_task(void *arg, int pending __unused)
{
struct dmar_unit *unit;
uint32_t ics;
- unit = arg;
- dmar_qi_drain_tlb_flush(unit);
+ unit = IOMMU2DMAR(arg);
+ iommu_qi_drain_tlb_flush(DMAR2IOMMU(unit));
/*
* Request an interrupt on the completion of the next invalidation
@@ -453,16 +324,16 @@ dmar_qi_task(void *arg, int pending __unused)
* Otherwise, such entries will linger until a later entry
* that requests an interrupt is processed.
*/
- dmar_qi_drain_tlb_flush(unit);
+ iommu_qi_drain_tlb_flush(DMAR2IOMMU(unit));
}
- if (unit->inv_seq_waiters > 0) {
+ if (unit->x86c.inv_seq_waiters > 0) {
/*
* Acquire the DMAR lock so that wakeup() is called only after
* the waiter is sleeping.
*/
DMAR_LOCK(unit);
- wakeup(&unit->inv_seq_waiters);
+ wakeup(&unit->x86c.inv_seq_waiters);
DMAR_UNLOCK(unit);
}
}
@@ -472,7 +343,7 @@ dmar_init_qi(struct dmar_unit *unit)
{
uint64_t iqa;
uint32_t ics;
- int qi_sz;
+ u_int qi_sz;
if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0)
return (0);
@@ -481,34 +352,19 @@ dmar_init_qi(struct dmar_unit *unit)
if (!unit->qi_enabled)
return (0);
- unit->tlb_flush_head = unit->tlb_flush_tail =
- iommu_gas_alloc_entry(NULL, 0);
- TASK_INIT(&unit->qi_task, 0, dmar_qi_task, unit);
- unit->qi_taskqueue = taskqueue_create_fast("dmarqf", M_WAITOK,
- taskqueue_thread_enqueue, &unit->qi_taskqueue);
- taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV,
- "dmar%d qi taskq", unit->iommu.unit);
-
- unit->inv_waitd_gen = 0;
- unit->inv_waitd_seq = 1;
-
- qi_sz = DMAR_IQA_QS_DEF;
- TUNABLE_INT_FETCH("hw.dmar.qi_size", &qi_sz);
- if (qi_sz > DMAR_IQA_QS_MAX)
- qi_sz = DMAR_IQA_QS_MAX;
- unit->inv_queue_size = (1ULL << qi_sz) * PAGE_SIZE;
- /* Reserve one descriptor to prevent wraparound. */
- unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ;
-
- /* The invalidation queue reads by DMARs are always coherent. */
- unit->inv_queue = kmem_alloc_contig(unit->inv_queue_size, M_WAITOK |
- M_ZERO, 0, iommu_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
- unit->inv_waitd_seq_hw_phys = pmap_kextract(
- (vm_offset_t)&unit->inv_waitd_seq_hw);
+ unit->x86c.qi_buf_maxsz = DMAR_IQA_QS_MAX;
+ unit->x86c.qi_cmd_sz = DMAR_IQ_DESCR_SZ;
+ iommu_qi_common_init(DMAR2IOMMU(unit), dmar_qi_task);
+ get_x86_iommu()->qi_ensure = dmar_qi_ensure;
+ get_x86_iommu()->qi_emit_wait_descr = dmar_qi_emit_wait_descr;
+ get_x86_iommu()->qi_advance_tail = dmar_qi_advance_tail;
+ get_x86_iommu()->qi_invalidate_emit = dmar_qi_invalidate_emit;
+
+ qi_sz = ilog2(unit->x86c.inv_queue_size / PAGE_SIZE);
DMAR_LOCK(unit);
dmar_write8(unit, DMAR_IQT_REG, 0);
- iqa = pmap_kextract((uintptr_t)unit->inv_queue);
+ iqa = pmap_kextract((uintptr_t)unit->x86c.inv_queue);
iqa |= qi_sz;
dmar_write8(unit, DMAR_IQA_REG, iqa);
dmar_enable_qi(unit);
@@ -517,49 +373,35 @@ dmar_init_qi(struct dmar_unit *unit)
ics = DMAR_ICS_IWC;
dmar_write4(unit, DMAR_ICS_REG, ics);
}
- dmar_enable_qi_intr(unit);
+ dmar_enable_qi_intr(DMAR2IOMMU(unit));
DMAR_UNLOCK(unit);
return (0);
}
+static void
+dmar_fini_qi_helper(struct iommu_unit *iommu)
+{
+ dmar_disable_qi_intr(iommu);
+ dmar_disable_qi(IOMMU2DMAR(iommu));
+}
+
void
dmar_fini_qi(struct dmar_unit *unit)
{
- struct iommu_qi_genseq gseq;
-
if (!unit->qi_enabled)
return;
- taskqueue_drain(unit->qi_taskqueue, &unit->qi_task);
- taskqueue_free(unit->qi_taskqueue);
- unit->qi_taskqueue = NULL;
-
- DMAR_LOCK(unit);
- /* quisce */
- dmar_qi_ensure(unit, 1);
- dmar_qi_emit_wait_seq(unit, &gseq, true);
- /* See dmar_qi_invalidate_sync_locked(). */
- unit->inv_seq_waiters++;
- dmar_qi_advance_tail(unit);
- dmar_qi_wait_for_seq(unit, &gseq, false);
- /* only after the quisce, disable queue */
- dmar_disable_qi_intr(unit);
- dmar_disable_qi(unit);
- KASSERT(unit->inv_seq_waiters == 0,
- ("dmar%d: waiters on disabled queue", unit->iommu.unit));
- DMAR_UNLOCK(unit);
-
- kmem_free(unit->inv_queue, unit->inv_queue_size);
- unit->inv_queue = NULL;
- unit->inv_queue_size = 0;
+ iommu_qi_common_fini(DMAR2IOMMU(unit), dmar_fini_qi_helper);
unit->qi_enabled = 0;
}
void
-dmar_enable_qi_intr(struct dmar_unit *unit)
+dmar_enable_qi_intr(struct iommu_unit *iommu)
{
+ struct dmar_unit *unit;
uint32_t iectl;
+ unit = IOMMU2DMAR(iommu);
DMAR_ASSERT_LOCKED(unit);
KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
unit->iommu.unit));
@@ -569,10 +411,12 @@ dmar_enable_qi_intr(struct dmar_unit *unit)
}
void
-dmar_disable_qi_intr(struct dmar_unit *unit)
+dmar_disable_qi_intr(struct iommu_unit *iommu)
{
+ struct dmar_unit *unit;
uint32_t iectl;
+ unit = IOMMU2DMAR(iommu);
DMAR_ASSERT_LOCKED(unit);
KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
unit->iommu.unit));
diff --git a/sys/x86/iommu/intel_utils.c b/sys/x86/iommu/intel_utils.c
index a96f65fddfc5..287b5fe9376a 100644
--- a/sys/x86/iommu/intel_utils.c
+++ b/sys/x86/iommu/intel_utils.c
@@ -172,23 +172,6 @@ dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr, bool allow_less
return (-1);
}
-/*
- * Calculate the total amount of page table pages needed to map the
- * whole bus address space on the context with the selected agaw.
- */
-vm_pindex_t
-pglvl_max_pages(int pglvl)
-{
- vm_pindex_t res;
- int i;
-
- for (res = 0, i = pglvl; i > 0; i--) {
- res *= IOMMU_NPTEPG;
- res++;
- }
- return (res);
-}
-
/*
* Return true if the page table level lvl supports the superpage for
* the context ctx.
@@ -209,26 +192,6 @@ domain_is_sp_lvl(struct dmar_domain *domain, int lvl)
return (alvl < nitems(sagaw_sp) && (sagaw_sp[alvl] & cap_sps) != 0);
}
-iommu_gaddr_t
-pglvl_page_size(int total_pglvl, int lvl)
-{
- int rlvl;
- static const iommu_gaddr_t pg_sz[] = {
- (iommu_gaddr_t)IOMMU_PAGE_SIZE,
- (iommu_gaddr_t)IOMMU_PAGE_SIZE << IOMMU_NPTEPGSHIFT,
- (iommu_gaddr_t)IOMMU_PAGE_SIZE << (2 * IOMMU_NPTEPGSHIFT),
- (iommu_gaddr_t)IOMMU_PAGE_SIZE << (3 * IOMMU_NPTEPGSHIFT),
- (iommu_gaddr_t)IOMMU_PAGE_SIZE << (4 * IOMMU_NPTEPGSHIFT),
- (iommu_gaddr_t)IOMMU_PAGE_SIZE << (5 * IOMMU_NPTEPGSHIFT),
- };
-
- KASSERT(lvl >= 0 && lvl < total_pglvl,
- ("total %d lvl %d", total_pglvl, lvl));
- rlvl = total_pglvl - lvl - 1;
- KASSERT(rlvl < nitems(pg_sz), ("sizeof pg_sz lvl %d", lvl));
- return (pg_sz[rlvl]);
-}
-
iommu_gaddr_t
domain_page_size(struct dmar_domain *domain, int lvl)
{
@@ -544,7 +507,6 @@ dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id)
DMAR_UNLOCK(dmar);
}
-int dmar_batch_coalesce = 100;
struct timespec dmar_hw_timeout = {
.tv_sec = 0,
.tv_nsec = 1000000
@@ -583,9 +545,6 @@ dmar_timeout_sysctl(SYSCTL_HANDLER_ARGS)
return (error);
}
-SYSCTL_INT(_hw_iommu_dmar, OID_AUTO, batch_coalesce, CTLFLAG_RWTUN,
- &dmar_batch_coalesce, 0,
- "Number of qi batches between interrupt");
SYSCTL_PROC(_hw_iommu_dmar, OID_AUTO, timeout,
CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
dmar_timeout_sysctl, "QU",
diff --git a/sys/x86/iommu/iommu_utils.c b/sys/x86/iommu/iommu_utils.c
index ffea1cc1a190..2011c632f770 100644
--- a/sys/x86/iommu/iommu_utils.c
+++ b/sys/x86/iommu/iommu_utils.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2013, 2014 The FreeBSD Foundation
+ * Copyright (c) 2013, 2014, 2024 The FreeBSD Foundation
*
* This software was developed by Konstantin Belousov
* under sponsorship from the FreeBSD Foundation.
@@ -28,21 +28,36 @@
* SUCH DAMAGE.
*/
+#include "opt_acpi.h"
+#if defined(__amd64__)
+#define DEV_APIC
+#else
+#include "opt_apic.h"
+#endif
+
#include
+#include
+#include
#include
+#include
#include
#include
#include
#include
#include
#include
+#include
#include
#include
#include
#include
-#include
+#include
+#include
+#include
#include
+#include
#include
+#include
#include
#include
#include
@@ -50,6 +65,13 @@
#include
#include
#include
+#include
+#ifdef DEV_APIC
+#include "pcib_if.h"
+#include
+#include
+#include
+#endif
vm_page_t
iommu_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags)
@@ -159,6 +181,571 @@ int iommu_tbl_pagecnt;
SYSCTL_NODE(_hw_iommu, OID_AUTO, dmar, CTLFLAG_RD | CTLFLAG_MPSAFE,
NULL, "");
-SYSCTL_INT(_hw_iommu_dmar, OID_AUTO, tbl_pagecnt, CTLFLAG_RD,
+SYSCTL_INT(_hw_iommu, OID_AUTO, tbl_pagecnt, CTLFLAG_RD,
&iommu_tbl_pagecnt, 0,
- "Count of pages used for DMAR pagetables");
+ "Count of pages used for IOMMU pagetables");
+
+int iommu_qi_batch_coalesce = 100;
+SYSCTL_INT(_hw_iommu, OID_AUTO, batch_coalesce, CTLFLAG_RWTUN,
+ &iommu_qi_batch_coalesce, 0,
+ "Number of qi batches between interrupt");
+
+static struct iommu_unit *
+x86_no_iommu_find(device_t dev, bool verbose)
+{
+ return (NULL);
+}
+
+static int
+x86_no_iommu_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
+{
+ return (EOPNOTSUPP);
+}
+
+static int
+x86_no_iommu_map_msi_intr(device_t src, u_int cpu, u_int vector,
+ u_int cookie, uint64_t *addr, uint32_t *data)
+{
+ return (EOPNOTSUPP);
+}
+
+static int
+x86_no_iommu_unmap_msi_intr(device_t src, u_int cookie)
+{
+ return (0);
+}
+
+static int
+x86_no_iommu_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector,
+ bool edge, bool activehi, int irq, u_int *cookie, uint32_t *hi,
+ uint32_t *lo)
+{
+ return (EOPNOTSUPP);
+}
+
+static int
+x86_no_iommu_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie)
+{
+ return (0);
+}
+
+static struct x86_iommu x86_no_iommu = {
+ .find = x86_no_iommu_find,
+ .alloc_msi_intr = x86_no_iommu_alloc_msi_intr,
+ .map_msi_intr = x86_no_iommu_map_msi_intr,
+ .unmap_msi_intr = x86_no_iommu_unmap_msi_intr,
+ .map_ioapic_intr = x86_no_iommu_map_ioapic_intr,
+ .unmap_ioapic_intr = x86_no_iommu_unmap_ioapic_intr,
+};
+
+static struct x86_iommu *x86_iommu = &x86_no_iommu;
+
+void
+set_x86_iommu(struct x86_iommu *x)
+{
+ MPASS(x86_iommu == &x86_no_iommu);
+ x86_iommu = x;
+}
+
+struct x86_iommu *
+get_x86_iommu(void)
+{
+ return (x86_iommu);
+}
+
+void
+iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
+ bool cansleep)
+{
+ x86_iommu->domain_unload_entry(entry, free, cansleep);
+}
+
+void
+iommu_domain_unload(struct iommu_domain *iodom,
+ struct iommu_map_entries_tailq *entries, bool cansleep)
+{
+ x86_iommu->domain_unload(iodom, entries, cansleep);
+}
+
+struct iommu_ctx *
+iommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid,
+ bool id_mapped, bool rmrr_init)
+{
+ return (x86_iommu->get_ctx(iommu, dev, rid, id_mapped, rmrr_init));
+}
+
+void
+iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *context)
+{
+ x86_iommu->free_ctx_locked(iommu, context);
+}
+
+void
+iommu_free_ctx(struct iommu_ctx *context)
+{
+ x86_iommu->free_ctx(context);
+}
+
+struct iommu_unit *
+iommu_find(device_t dev, bool verbose)
+{
+ return (x86_iommu->find(dev, verbose));
+}
+
+int
+iommu_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
+{
+ return (x86_iommu->alloc_msi_intr(src, cookies, count));
+}
+
+int
+iommu_map_msi_intr(device_t src, u_int cpu, u_int vector, u_int cookie,
+ uint64_t *addr, uint32_t *data)
+{
+ return (x86_iommu->map_msi_intr(src, cpu, vector, cookie,
+ addr, data));
+}
+
+int
+iommu_unmap_msi_intr(device_t src, u_int cookie)
+{
+ return (x86_iommu->unmap_msi_intr(src, cookie));
+}
+
+int
+iommu_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
+ bool activehi, int irq, u_int *cookie, uint32_t *hi, uint32_t *lo)
+{
+ return (x86_iommu->map_ioapic_intr(ioapic_id, cpu, vector, edge,
+ activehi, irq, cookie, hi, lo));
+}
+
+int
+iommu_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie)
+{
+ return (x86_iommu->unmap_ioapic_intr(ioapic_id, cookie));
+}
+
+void
+iommu_unit_pre_instantiate_ctx(struct iommu_unit *unit)
+{
+ x86_iommu->unit_pre_instantiate_ctx(unit);
+}
+
+#define IOMMU2X86C(iommu) (x86_iommu->get_x86_common(iommu))
+
+static bool
+iommu_qi_seq_processed(struct iommu_unit *unit,
+ const struct iommu_qi_genseq *pseq)
+{
+ struct x86_unit_common *x86c;
+ u_int gen;
+
+ x86c = IOMMU2X86C(unit);
+ gen = x86c->inv_waitd_gen;
+ return (pseq->gen < gen ||
+ (pseq->gen == gen && pseq->seq <= x86c->inv_waitd_seq_hw));
+}
+
+void
+iommu_qi_emit_wait_seq(struct iommu_unit *unit, struct iommu_qi_genseq *pseq,
+ bool emit_wait)
+{
+ struct x86_unit_common *x86c;
+ struct iommu_qi_genseq gsec;
+ uint32_t seq;
+
+ KASSERT(pseq != NULL, ("wait descriptor with no place for seq"));
+ IOMMU_ASSERT_LOCKED(unit);
+ x86c = IOMMU2X86C(unit);
+
+ if (x86c->inv_waitd_seq == 0xffffffff) {
+ gsec.gen = x86c->inv_waitd_gen;
+ gsec.seq = x86c->inv_waitd_seq;
+ x86_iommu->qi_ensure(unit, 1);
+ x86_iommu->qi_emit_wait_descr(unit, gsec.seq, false,
+ true, false);
+ x86_iommu->qi_advance_tail(unit);
+ while (!iommu_qi_seq_processed(unit, &gsec))
+ cpu_spinwait();
+ x86c->inv_waitd_gen++;
+ x86c->inv_waitd_seq = 1;
+ }
+ seq = x86c->inv_waitd_seq++;
+ pseq->gen = x86c->inv_waitd_gen;
+ pseq->seq = seq;
+ if (emit_wait) {
+ x86_iommu->qi_ensure(unit, 1);
+ x86_iommu->qi_emit_wait_descr(unit, seq, true, true, false);
+ }
+}
+
+/*
+ * To avoid missed wakeups, callers must increment the unit's waiters count
+ * before advancing the tail past the wait descriptor.
+ */
+void
+iommu_qi_wait_for_seq(struct iommu_unit *unit, const struct iommu_qi_genseq *
+ gseq, bool nowait)
+{
+ struct x86_unit_common *x86c;
+
+ IOMMU_ASSERT_LOCKED(unit);
+ x86c = IOMMU2X86C(unit);
+
+ KASSERT(x86c->inv_seq_waiters > 0, ("%s: no waiters", __func__));
+ while (!iommu_qi_seq_processed(unit, gseq)) {
+ if (cold || nowait) {
+ cpu_spinwait();
+ } else {
+ msleep(&x86c->inv_seq_waiters, &unit->lock, 0,
+ "dmarse", hz);
+ }
+ }
+ x86c->inv_seq_waiters--;
+}
+
+/*
+ * The caller must not be using the entry's dmamap_link field.
+ */
+void
+iommu_qi_invalidate_locked(struct iommu_domain *domain,
+ struct iommu_map_entry *entry, bool emit_wait)
+{
+ struct iommu_unit *unit;
+ struct x86_unit_common *x86c;
+
+ unit = domain->iommu;
+ x86c = IOMMU2X86C(unit);
+ IOMMU_ASSERT_LOCKED(unit);
+
+ x86_iommu->qi_invalidate_emit(domain, entry->start, entry->end -
+ entry->start, &entry->gseq, emit_wait);
+
+ /*
+ * To avoid a data race in dmar_qi_task(), the entry's gseq must be
+ * initialized before the entry is added to the TLB flush list, and the
+ * entry must be added to that list before the tail is advanced. More
+ * precisely, the tail must not be advanced past the wait descriptor
+ * that will generate the interrupt that schedules dmar_qi_task() for
+ * execution before the entry is added to the list. While an earlier
+ * call to dmar_qi_ensure() might have advanced the tail, it will not
+ * advance it past the wait descriptor.
+ *
+ * See the definition of struct dmar_unit for more information on
+ * synchronization.
+ */
+ entry->tlb_flush_next = NULL;
+ atomic_store_rel_ptr((uintptr_t *)&x86c->tlb_flush_tail->
+ tlb_flush_next, (uintptr_t)entry);
+ x86c->tlb_flush_tail = entry;
+
+ x86_iommu->qi_advance_tail(unit);
+}
+
+void
+iommu_qi_invalidate_sync(struct iommu_domain *domain, iommu_gaddr_t base,
+ iommu_gaddr_t size, bool cansleep)
+{
+ struct iommu_unit *unit;
+ struct iommu_qi_genseq gseq;
+
+ unit = domain->iommu;
+ IOMMU_LOCK(unit);
+ x86_iommu->qi_invalidate_emit(domain, base, size, &gseq, true);
+
+ /*
+ * To avoid a missed wakeup in iommu_qi_task(), the unit's
+ * waiters count must be incremented before the tail is
+ * advanced.
+ */
+ IOMMU2X86C(unit)->inv_seq_waiters++;
+
+ x86_iommu->qi_advance_tail(unit);
+ iommu_qi_wait_for_seq(unit, &gseq, !cansleep);
+ IOMMU_UNLOCK(unit);
+}
+
+void
+iommu_qi_drain_tlb_flush(struct iommu_unit *unit)
+{
+ struct x86_unit_common *x86c;
+ struct iommu_map_entry *entry, *head;
+
+ x86c = IOMMU2X86C(unit);
+ for (head = x86c->tlb_flush_head;; head = entry) {
+ entry = (struct iommu_map_entry *)
+ atomic_load_acq_ptr((uintptr_t *)&head->tlb_flush_next);
+ if (entry == NULL ||
+ !iommu_qi_seq_processed(unit, &entry->gseq))
+ break;
+ x86c->tlb_flush_head = entry;
+ iommu_gas_free_entry(head);
+ if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
+ iommu_gas_free_region(entry);
+ else
+ iommu_gas_free_space(entry);
+ }
+}
+
+void
+iommu_qi_common_init(struct iommu_unit *unit, task_fn_t qi_task)
+{
+ struct x86_unit_common *x86c;
+ u_int qi_sz;
+
+ x86c = IOMMU2X86C(unit);
+
+ x86c->tlb_flush_head = x86c->tlb_flush_tail =
+ iommu_gas_alloc_entry(NULL, 0);
+ TASK_INIT(&x86c->qi_task, 0, qi_task, unit);
+ x86c->qi_taskqueue = taskqueue_create_fast("iommuqf", M_WAITOK,
+ taskqueue_thread_enqueue, &x86c->qi_taskqueue);
+ taskqueue_start_threads(&x86c->qi_taskqueue, 1, PI_AV,
+ "iommu%d qi taskq", unit->unit);
+
+ x86c->inv_waitd_gen = 0;
+ x86c->inv_waitd_seq = 1;
+
+ qi_sz = 3;
+ TUNABLE_INT_FETCH("hw.iommu.qi_size", &qi_sz);
+ if (qi_sz > x86c->qi_buf_maxsz)
+ qi_sz = x86c->qi_buf_maxsz;
+ x86c->inv_queue_size = (1ULL << qi_sz) * PAGE_SIZE;
+ /* Reserve one descriptor to prevent wraparound. */
+ x86c->inv_queue_avail = x86c->inv_queue_size -
+ x86c->qi_cmd_sz;
+
+ /*
+ * The invalidation queue reads by DMARs/AMDIOMMUs are always
+ * coherent.
+ */
+ x86c->inv_queue = kmem_alloc_contig(x86c->inv_queue_size,
+ M_WAITOK | M_ZERO, 0, iommu_high, PAGE_SIZE, 0,
+ VM_MEMATTR_DEFAULT);
+ x86c->inv_waitd_seq_hw_phys = pmap_kextract(
+ (vm_offset_t)&x86c->inv_waitd_seq_hw);
+}
+
+void
+iommu_qi_common_fini(struct iommu_unit *unit, void (*disable_qi)(
+ struct iommu_unit *))
+{
+ struct x86_unit_common *x86c;
+ struct iommu_qi_genseq gseq;
+
+ x86c = IOMMU2X86C(unit);
+
+ taskqueue_drain(x86c->qi_taskqueue, &x86c->qi_task);
+ taskqueue_free(x86c->qi_taskqueue);
+ x86c->qi_taskqueue = NULL;
+
+ IOMMU_LOCK(unit);
+ /* quisce */
+ x86_iommu->qi_ensure(unit, 1);
+ iommu_qi_emit_wait_seq(unit, &gseq, true);
+ /* See iommu_qi_invalidate_locked(). */
+ x86c->inv_seq_waiters++;
+ x86_iommu->qi_advance_tail(unit);
+ iommu_qi_wait_for_seq(unit, &gseq, false);
+ /* only after the quisce, disable queue */
+ disable_qi(unit);
+ KASSERT(x86c->inv_seq_waiters == 0,
+ ("iommu%d: waiters on disabled queue", unit->unit));
+ IOMMU_UNLOCK(unit);
+
+ kmem_free(x86c->inv_queue, x86c->inv_queue_size);
+ x86c->inv_queue = NULL;
+ x86c->inv_queue_size = 0;
+}
+
+int
+iommu_alloc_irq(struct iommu_unit *unit, int idx)
+{
+ device_t dev, pcib;
+ struct iommu_msi_data *dmd;
+ uint64_t msi_addr;
+ uint32_t msi_data;
+ int error;
+
+ MPASS(idx >= 0 || idx < IOMMU_MAX_MSI);
+
+ dev = unit->dev;
+ dmd = &IOMMU2X86C(unit)->intrs[idx];
+ pcib = device_get_parent(device_get_parent(dev)); /* Really not pcib */
+ error = PCIB_ALLOC_MSIX(pcib, dev, &dmd->irq);
+ if (error != 0) {
+ device_printf(dev, "cannot allocate %s interrupt, %d\n",
+ dmd->name, error);
+ goto err1;
+ }
+ error = bus_set_resource(dev, SYS_RES_IRQ, dmd->irq_rid,
+ dmd->irq, 1);
+ if (error != 0) {
+ device_printf(dev, "cannot set %s interrupt resource, %d\n",
+ dmd->name, error);
+ goto err2;
+ }
+ dmd->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &dmd->irq_rid, RF_ACTIVE);
+ if (dmd->irq_res == NULL) {
+ device_printf(dev,
+ "cannot allocate resource for %s interrupt\n", dmd->name);
+ error = ENXIO;
+ goto err3;
+ }
+ error = bus_setup_intr(dev, dmd->irq_res, INTR_TYPE_MISC,
+ dmd->handler, NULL, unit, &dmd->intr_handle);
+ if (error != 0) {
+ device_printf(dev, "cannot setup %s interrupt, %d\n",
+ dmd->name, error);
+ goto err4;
+ }
+ bus_describe_intr(dev, dmd->irq_res, dmd->intr_handle, "%s", dmd->name);
+ error = PCIB_MAP_MSI(pcib, dev, dmd->irq, &msi_addr, &msi_data);
+ if (error != 0) {
+ device_printf(dev, "cannot map %s interrupt, %d\n",
+ dmd->name, error);
+ goto err5;
+ }
+
+ dmd->msi_data = msi_data;
+ dmd->msi_addr = msi_addr;
+
+ return (0);
+
+err5:
+ bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle);
+err4:
+ bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res);
+err3:
+ bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid);
+err2:
+ PCIB_RELEASE_MSIX(pcib, dev, dmd->irq);
+ dmd->irq = -1;
+err1:
+ return (error);
+}
+
+void
+iommu_release_intr(struct iommu_unit *unit, int idx)
+{
+ device_t dev;
+ struct iommu_msi_data *dmd;
+
+ MPASS(idx >= 0 || idx < IOMMU_MAX_MSI);
+
+ dmd = &IOMMU2X86C(unit)->intrs[idx];
+ if (dmd->handler == NULL || dmd->irq == -1)
+ return;
+ dev = unit->dev;
+
+ bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle);
+ bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res);
+ bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid);
+ PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)),
+ dev, dmd->irq);
+ dmd->irq = -1;
+}
+
+void
+iommu_device_tag_init(struct iommu_ctx *ctx, device_t dev)
+{
+ bus_addr_t maxaddr;
+
+ maxaddr = MIN(ctx->domain->end, BUS_SPACE_MAXADDR);
+ ctx->tag->common.impl = &bus_dma_iommu_impl;
+ ctx->tag->common.boundary = 0;
+ ctx->tag->common.lowaddr = maxaddr;
+ ctx->tag->common.highaddr = maxaddr;
+ ctx->tag->common.maxsize = maxaddr;
+ ctx->tag->common.nsegments = BUS_SPACE_UNRESTRICTED;
+ ctx->tag->common.maxsegsz = maxaddr;
+ ctx->tag->ctx = ctx;
+ ctx->tag->owner = dev;
+}
+
+void
+iommu_domain_free_entry(struct iommu_map_entry *entry, bool free)
+{
+ if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
+ iommu_gas_free_region(entry);
+ else
+ iommu_gas_free_space(entry);
+ if (free)
+ iommu_gas_free_entry(entry);
+ else
+ entry->flags = 0;
+}
+
+/*
+ * Index of the pte for the guest address base in the page table at
+ * the level lvl.
+ */
+int
+pglvl_pgtbl_pte_off(int pglvl, iommu_gaddr_t base, int lvl)
+{
+
+ base >>= IOMMU_PAGE_SHIFT + (pglvl - lvl - 1) *
+ IOMMU_NPTEPGSHIFT;
+ return (base & IOMMU_PTEMASK);
+}
+
+/*
+ * Returns the page index of the page table page in the page table
+ * object, which maps the given address base at the page table level
+ * lvl.
+ */
+vm_pindex_t
+pglvl_pgtbl_get_pindex(int pglvl, iommu_gaddr_t base, int lvl)
+{
+ vm_pindex_t idx, pidx;
+ int i;
+
+ KASSERT(lvl >= 0 && lvl < pglvl,
+ ("wrong lvl %d %d", pglvl, lvl));
+
+ for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx) {
+ idx = pglvl_pgtbl_pte_off(pglvl, base, i) +
+ pidx * IOMMU_NPTEPG + 1;
+ }
+ return (idx);
+}
+
+/*
+ * Calculate the total amount of page table pages needed to map the
+ * whole bus address space on the context with the selected agaw.
+ */
+vm_pindex_t
+pglvl_max_pages(int pglvl)
+{
+ vm_pindex_t res;
+ int i;
+
+ for (res = 0, i = pglvl; i > 0; i--) {
+ res *= IOMMU_NPTEPG;
+ res++;
+ }
+ return (res);
+}
+
+iommu_gaddr_t
+pglvl_page_size(int total_pglvl, int lvl)
+{
+ int rlvl;
+ static const iommu_gaddr_t pg_sz[] = {
+ (iommu_gaddr_t)IOMMU_PAGE_SIZE,
+ (iommu_gaddr_t)IOMMU_PAGE_SIZE << IOMMU_NPTEPGSHIFT,
+ (iommu_gaddr_t)IOMMU_PAGE_SIZE << (2 * IOMMU_NPTEPGSHIFT),
+ (iommu_gaddr_t)IOMMU_PAGE_SIZE << (3 * IOMMU_NPTEPGSHIFT),
+ (iommu_gaddr_t)IOMMU_PAGE_SIZE << (4 * IOMMU_NPTEPGSHIFT),
+ (iommu_gaddr_t)IOMMU_PAGE_SIZE << (5 * IOMMU_NPTEPGSHIFT),
+ (iommu_gaddr_t)IOMMU_PAGE_SIZE << (6 * IOMMU_NPTEPGSHIFT),
+ };
+
+ KASSERT(lvl >= 0 && lvl < total_pglvl,
+ ("total %d lvl %d", total_pglvl, lvl));
+ rlvl = total_pglvl - lvl - 1;
+ KASSERT(rlvl < nitems(pg_sz), ("sizeof pg_sz lvl %d", lvl));
+ return (pg_sz[rlvl]);
+}
diff --git a/sys/x86/iommu/x86_iommu.h b/sys/x86/iommu/x86_iommu.h
index 3789586f1eaf..a1ed5c71c513 100644
--- a/sys/x86/iommu/x86_iommu.h
+++ b/sys/x86/iommu/x86_iommu.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2013-2015 The FreeBSD Foundation
+ * Copyright (c) 2013-2015, 2024 The FreeBSD Foundation
*
* This software was developed by Konstantin Belousov
* under sponsorship from the FreeBSD Foundation.
@@ -55,8 +55,142 @@ void iommu_unmap_pgtbl(struct sf_buf *sf);
extern iommu_haddr_t iommu_high;
extern int iommu_tbl_pagecnt;
+extern int iommu_qi_batch_coalesce;
SYSCTL_DECL(_hw_iommu);
-SYSCTL_DECL(_hw_iommu_dmar);
+
+struct x86_unit_common;
+
+struct x86_iommu {
+ struct x86_unit_common *(*get_x86_common)(struct
+ iommu_unit *iommu);
+ void (*unit_pre_instantiate_ctx)(struct iommu_unit *iommu);
+ void (*qi_ensure)(struct iommu_unit *unit, int descr_count);
+ void (*qi_emit_wait_descr)(struct iommu_unit *unit, uint32_t seq,
+ bool, bool, bool);
+ void (*qi_advance_tail)(struct iommu_unit *unit);
+ void (*qi_invalidate_emit)(struct iommu_domain *idomain,
+ iommu_gaddr_t base, iommu_gaddr_t size, struct iommu_qi_genseq *
+ pseq, bool emit_wait);
+ void (*domain_unload_entry)(struct iommu_map_entry *entry, bool free,
+ bool cansleep);
+ void (*domain_unload)(struct iommu_domain *iodom,
+ struct iommu_map_entries_tailq *entries, bool cansleep);
+ struct iommu_ctx *(*get_ctx)(struct iommu_unit *iommu,
+ device_t dev, uint16_t rid, bool id_mapped, bool rmrr_init);
+ void (*free_ctx_locked)(struct iommu_unit *iommu,
+ struct iommu_ctx *context);
+ void (*free_ctx)(struct iommu_ctx *context);
+ struct iommu_unit *(*find)(device_t dev, bool verbose);
+ int (*alloc_msi_intr)(device_t src, u_int *cookies, u_int count);
+ int (*map_msi_intr)(device_t src, u_int cpu, u_int vector,
+ u_int cookie, uint64_t *addr, uint32_t *data);
+ int (*unmap_msi_intr)(device_t src, u_int cookie);
+ int (*map_ioapic_intr)(u_int ioapic_id, u_int cpu, u_int vector,
+ bool edge, bool activehi, int irq, u_int *cookie, uint32_t *hi,
+ uint32_t *lo);
+ int (*unmap_ioapic_intr)(u_int ioapic_id, u_int *cookie);
+};
+void set_x86_iommu(struct x86_iommu *);
+struct x86_iommu *get_x86_iommu(void);
+
+struct iommu_msi_data {
+ int irq;
+ int irq_rid;
+ struct resource *irq_res;
+ void *intr_handle;
+ int (*handler)(void *);
+ int msi_data_reg;
+ int msi_addr_reg;
+ int msi_uaddr_reg;
+ uint64_t msi_addr;
+ uint32_t msi_data;
+ void (*enable_intr)(struct iommu_unit *);
+ void (*disable_intr)(struct iommu_unit *);
+ const char *name;
+};
+
+#define IOMMU_MAX_MSI 3
+
+struct x86_unit_common {
+ uint32_t qi_buf_maxsz;
+ uint32_t qi_cmd_sz;
+
+ char *inv_queue;
+ vm_size_t inv_queue_size;
+ uint32_t inv_queue_avail;
+ uint32_t inv_queue_tail;
+
+ /*
+ * Hw writes there on completion of wait descriptor
+ * processing. Intel writes 4 bytes, while AMD does the
+ * 8-bytes write. Due to little-endian, and use of 4-byte
+ * sequence numbers, the difference does not matter for us.
+ */
+ volatile uint64_t inv_waitd_seq_hw;
+
+ uint64_t inv_waitd_seq_hw_phys;
+ uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
+ u_int inv_waitd_gen; /* seq number generation AKA seq overflows */
+ u_int inv_seq_waiters; /* count of waiters for seq */
+ u_int inv_queue_full; /* informational counter */
+
+ /*
+ * Delayed freeing of map entries queue processing:
+ *
+ * tlb_flush_head and tlb_flush_tail are used to implement a FIFO
+ * queue that supports concurrent dequeues and enqueues. However,
+ * there can only be a single dequeuer (accessing tlb_flush_head) and
+ * a single enqueuer (accessing tlb_flush_tail) at a time. Since the
+ * unit's qi_task is the only dequeuer, it can access tlb_flush_head
+ * without any locking. In contrast, there may be multiple enqueuers,
+ * so the enqueuers acquire the iommu unit lock to serialize their
+ * accesses to tlb_flush_tail.
+ *
+ * In this FIFO queue implementation, the key to enabling concurrent
+ * dequeues and enqueues is that the dequeuer never needs to access
+ * tlb_flush_tail and the enqueuer never needs to access
+ * tlb_flush_head. In particular, tlb_flush_head and tlb_flush_tail
+ * are never NULL, so neither a dequeuer nor an enqueuer ever needs to
+ * update both. Instead, tlb_flush_head always points to a "zombie"
+ * struct, which previously held the last dequeued item. Thus, the
+ * zombie's next field actually points to the struct holding the first
+ * item in the queue. When an item is dequeued, the current zombie is
+ * finally freed, and the struct that held the just dequeued item
+ * becomes the new zombie. When the queue is empty, tlb_flush_tail
+ * also points to the zombie.
+ */
+ struct iommu_map_entry *tlb_flush_head;
+ struct iommu_map_entry *tlb_flush_tail;
+ struct task qi_task;
+ struct taskqueue *qi_taskqueue;
+
+ struct iommu_msi_data intrs[IOMMU_MAX_MSI];
+};
+
+void iommu_domain_free_entry(struct iommu_map_entry *entry, bool free);
+
+void iommu_qi_emit_wait_seq(struct iommu_unit *unit, struct iommu_qi_genseq *
+ pseq, bool emit_wait);
+void iommu_qi_wait_for_seq(struct iommu_unit *unit, const struct
+ iommu_qi_genseq *gseq, bool nowait);
+void iommu_qi_drain_tlb_flush(struct iommu_unit *unit);
+void iommu_qi_invalidate_locked(struct iommu_domain *domain,
+ struct iommu_map_entry *entry, bool emit_wait);
+void iommu_qi_invalidate_sync(struct iommu_domain *domain, iommu_gaddr_t base,
+ iommu_gaddr_t size, bool cansleep);
+void iommu_qi_common_init(struct iommu_unit *unit, task_fn_t taskfunc);
+void iommu_qi_common_fini(struct iommu_unit *unit, void (*disable_qi)(
+ struct iommu_unit *));
+
+int iommu_alloc_irq(struct iommu_unit *unit, int idx);
+void iommu_release_intr(struct iommu_unit *unit, int idx);
+
+void iommu_device_tag_init(struct iommu_ctx *ctx, device_t dev);
+
+int pglvl_pgtbl_pte_off(int pglvl, iommu_gaddr_t base, int lvl);
+vm_pindex_t pglvl_pgtbl_get_pindex(int pglvl, iommu_gaddr_t base, int lvl);
+vm_pindex_t pglvl_max_pages(int pglvl);
+iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
#endif
diff --git a/sys/x86/x86/msi.c b/sys/x86/x86/msi.c
index 888635cba3f4..c8e7db9469ed 100644
--- a/sys/x86/x86/msi.c
+++ b/sys/x86/x86/msi.c
@@ -480,6 +480,7 @@ msi_alloc(device_t dev, int count, int maxcount, int *irqs)
if (error != 0) {
for (i = 0; i < count; i++)
apic_free_vector(cpu, vector + i, irqs[i]);
+ mtx_unlock(&msi_lock);
free(mirqs, M_MSI);
return (error);
}
@@ -554,7 +555,9 @@ msi_release(int *irqs, int count)
KASSERT(msi->msi_first == first, ("message not in group"));
KASSERT(msi->msi_dev == first->msi_dev, ("owner mismatch"));
#ifdef IOMMU
+ mtx_unlock(&msi_lock);
iommu_unmap_msi_intr(first->msi_dev, msi->msi_remap_cookie);
+ mtx_lock(&msi_lock);
#endif
msi->msi_first = NULL;
msi->msi_dev = NULL;
diff --git a/sys/xdr/xdr_sizeof.c b/sys/xdr/xdr_sizeof.c
index 6b4ee0352c9e..bcacf918f4fd 100644
--- a/sys/xdr/xdr_sizeof.c
+++ b/sys/xdr/xdr_sizeof.c
@@ -94,10 +94,7 @@ x_inline(XDR *xdrs, u_int len)
/* Free the earlier space and allocate new area */
if (xdrs->x_private)
free(xdrs->x_private, M_RPC);
- if ((xdrs->x_private = (caddr_t) malloc(len, M_RPC, M_WAITOK)) == NULL) {
- xdrs->x_base = 0;
- return (NULL);
- }
+ xdrs->x_private = malloc(len, M_RPC, M_WAITOK);
xdrs->x_base = (caddr_t)(uintptr_t) len;
xdrs->x_handy += len;
return ((int32_t *) xdrs->x_private);
diff --git a/tests/sys/Makefile b/tests/sys/Makefile
index 69797de10fda..da7d1a5070ed 100644
--- a/tests/sys/Makefile
+++ b/tests/sys/Makefile
@@ -6,6 +6,7 @@ TESTS_SUBDIRS+= acl
TESTS_SUBDIRS+= aio
TESTS_SUBDIRS+= ${_audit}
TESTS_SUBDIRS+= auditpipe
+TESTS_SUBDIRS+= cam
TESTS_SUBDIRS+= capsicum
TESTS_SUBDIRS+= ${_cddl}
# XXX: Currently broken in CI
diff --git a/tests/sys/cam/Makefile b/tests/sys/cam/Makefile
new file mode 100644
index 000000000000..4cc36604280a
--- /dev/null
+++ b/tests/sys/cam/Makefile
@@ -0,0 +1,7 @@
+.include
+
+TESTSDIR= ${TESTSBASE}/sys/cam
+
+TESTS_SUBDIRS+= ctl
+
+.include
diff --git a/tests/sys/cam/ctl/Makefile b/tests/sys/cam/ctl/Makefile
new file mode 100644
index 000000000000..1333397af464
--- /dev/null
+++ b/tests/sys/cam/ctl/Makefile
@@ -0,0 +1,14 @@
+PACKAGE= tests
+
+TESTSDIR= ${TESTSBASE}/sys/cam/ctl
+
+${PACKAGE}FILES+= ctl.subr
+
+ATF_TESTS_SH+= prevent
+ATF_TESTS_SH+= read_buffer
+ATF_TESTS_SH+= start_stop_unit
+
+# Must be exclusive because it disables/enables camsim
+TEST_METADATA+= is_exclusive="true"
+
+.include
diff --git a/tests/sys/cam/ctl/ctl.subr b/tests/sys/cam/ctl/ctl.subr
new file mode 100644
index 000000000000..18991e0fa144
--- /dev/null
+++ b/tests/sys/cam/ctl/ctl.subr
@@ -0,0 +1,94 @@
+# vim: filetype=sh
+
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (c) 2024 Axcient
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS DOCUMENTATION IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+load_modules() {
+ if ! kldstat -q -m ctl; then
+ kldload ctl || atf_skip "could not load ctl kernel mod"
+ fi
+ if ! ctladm port -o on -p 0; then
+ atf_skip "could not enable the camsim frontend"
+ fi
+}
+
+find_device() {
+ LUN=$1
+
+ # Rescan camsim
+ # XXX camsim doesn't update when creating a new device. Worse, a
+ # rescan won't look for new devices. So we must disable/re-enable it.
+ # Worse still, enabling it isn't synchronous, so we need a retry loop
+ # https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=281000
+ retries=5
+ ctladm port -o off -p 0 >/dev/null
+ ctladm port -o on -p 0 >/dev/null
+ HEXLUN=`printf %x $LUN`
+ while true; do
+ dev=`camcontrol devlist | awk -v lun=$HEXLUN '/FREEBSD CTL/ && $9==lun {split($10, fields, /[,]/); print fields[1];}' | sed 's:[()]::'`
+ if [ -z "$dev" -o ! -c /dev/$dev ]; then
+ retries=$(( $retries - 1 ))
+ if [ $retries -eq 0 ]; then
+ cat lun-create.txt
+ camcontrol devlist
+ atf_fail "Could not find GEOM device"
+ fi
+ sleep 0.1
+ continue
+ fi
+ break
+ done
+ # Ensure that it's actually ready. camcontrol may report the disk's
+ # ident before it's actually ready to receive commands. Maybe that's
+ # because all of the GEOM providers must probe it?
+ while true; do
+ dd if=/dev/$dev bs=4096 count=1 of=/dev/null >/dev/null 2>/dev/null && break
+ retries=$(( $retries - 1 ))
+ if [ $retries -eq 0 ]; then
+ atf_fail "Device never became ready"
+ fi
+ sleep 0.1
+ done
+}
+
+# Create a CTL LUN
+create_ramdisk() {
+ EXTRA_ARGS=$*
+
+ atf_check -o save:lun-create.txt ctladm create -b ramdisk -s 1048576 $EXTRA_ARGS
+ atf_check egrep -q "LUN created successfully" lun-create.txt
+ LUN=`awk '/LUN ID:/ {print $NF}' lun-create.txt`
+ if [ -z "$LUN" ]; then
+ atf_fail "Could not find LUN id"
+ fi
+ find_device $LUN
+}
+
+cleanup() {
+ if [ -e "lun-create.txt" ]; then
+ lun_id=`awk '/LUN ID:/ {print $NF}' lun-create.txt`
+ ctladm remove -b ramdisk -l $lun_id > /dev/null
+ fi
+}
diff --git a/tests/sys/cam/ctl/prevent.sh b/tests/sys/cam/ctl/prevent.sh
new file mode 100644
index 000000000000..a5a187dad8ff
--- /dev/null
+++ b/tests/sys/cam/ctl/prevent.sh
@@ -0,0 +1,161 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (c) 2024 Axcient
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS DOCUMENTATION IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+. $(atf_get_srcdir)/ctl.subr
+
+# TODO
+# * multiple initiators may block removal
+
+# Not Tested
+# * persistent removal (not implemented in CTL)
+
+atf_test_case allow cleanup
+allow_head()
+{
+ atf_set "descr" "SCSI PREVENT ALLOW MEDIUM REMOVAL will prevent a CD from being ejected"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_prevent sg_start
+}
+allow_body()
+{
+ # -t 5 for CD/DVD device type
+ create_ramdisk -t 5
+
+ atf_check sg_prevent --prevent 1 /dev/$dev
+
+ # Now sg_start --eject should fail
+ atf_check -s exit:5 -e match:"Illegal request" sg_start --eject /dev/$dev
+
+ atf_check sg_prevent --allow /dev/$dev
+
+ # Now sg_start --eject should work again
+ atf_check -s exit:0 sg_start --eject /dev/$dev
+}
+allow_cleanup()
+{
+ cleanup
+}
+
+atf_test_case allow_idempotent cleanup
+allow_idempotent_head()
+{
+ atf_set "descr" "SCSI PREVENT ALLOW MEDIUM REMOVAL is idempotent when run from the same initiator"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_prevent sg_start
+}
+allow_idempotent_body()
+{
+ # -t 5 for CD/DVD device type
+ create_ramdisk -t 5
+
+ atf_check sg_prevent --allow /dev/$dev
+ atf_check sg_prevent --allow /dev/$dev
+ atf_check sg_prevent --prevent 1 /dev/$dev
+
+ # Even though we ran --allow twice, a single --prevent command should
+ # suffice to prevent ejecting. Multiple ALLOW/PREVENT commands from
+ # the same initiator don't have any additional effect.
+ atf_check -s exit:5 -e match:"Illegal request" sg_start --eject /dev/$dev
+}
+allow_idempotent_cleanup()
+{
+ cleanup
+}
+
+atf_test_case nonremovable cleanup
+nonremovable_head()
+{
+ atf_set "descr" "SCSI PREVENT ALLOW MEDIUM REMOVAL may not be used on non-removable media"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_prevent
+}
+nonremovable_body()
+{
+ # Create a HDD, not a CD, device
+ create_ramdisk -t 0
+
+ atf_check -s exit:9 -e match:"Invalid opcode" sg_prevent /dev/$dev
+}
+nonremovable_cleanup()
+{
+ cleanup
+}
+
+atf_test_case prevent cleanup
+prevent_head()
+{
+ atf_set "descr" "SCSI PREVENT ALLOW MEDIUM REMOVAL will prevent a CD from being ejected"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_prevent sg_start
+}
+prevent_body()
+{
+ # -t 5 for CD/DVD device type
+ create_ramdisk -t 5
+
+ atf_check sg_prevent --prevent 1 /dev/$dev
+
+ # Now sg_start --eject should fail
+ atf_check -s exit:5 -e match:"Illegal request" sg_start --eject /dev/$dev
+}
+prevent_cleanup()
+{
+ cleanup
+}
+
+atf_test_case prevent_idempotent cleanup
+prevent_idempotent_head()
+{
+ atf_set "descr" "SCSI PREVENT ALLOW MEDIUM REMOVAL is idempotent when run from the same initiator"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_prevent sg_start
+}
+prevent_idempotent_body()
+{
+ # -t 5 for CD/DVD device type
+ create_ramdisk -t 5
+
+ atf_check sg_prevent --prevent 1 /dev/$dev
+ atf_check sg_prevent --prevent 1 /dev/$dev
+ atf_check sg_prevent --allow /dev/$dev
+
+ # Even though we ran prevent idempotent and allow only once, eject
+ # should be allowed. Multiple PREVENT commands from the same initiator
+ # don't have any additional effect.
+ atf_check sg_start --eject /dev/$dev
+}
+prevent_idempotent_cleanup()
+{
+ cleanup
+}
+
+atf_init_test_cases()
+{
+ atf_add_test_case allow
+ atf_add_test_case allow_idempotent
+ atf_add_test_case nonremovable
+ atf_add_test_case prevent
+ atf_add_test_case prevent_idempotent
+}
diff --git a/tests/sys/cam/ctl/read_buffer.sh b/tests/sys/cam/ctl/read_buffer.sh
new file mode 100644
index 000000000000..e54b0dadc134
--- /dev/null
+++ b/tests/sys/cam/ctl/read_buffer.sh
@@ -0,0 +1,172 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (c) 2024 Axcient
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS DOCUMENTATION IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Not tested
+# * modes other than "Data" and "Desc". We don't support those.
+# * Buffer ID other than 0. We don't support those.
+# * The Mode Specific field. We don't support it.
+
+. $(atf_get_srcdir)/ctl.subr
+
+atf_test_case basic cleanup
+basic_head()
+{
+ atf_set "descr" "READ BUFFER can retrieve data previously written by WRITE BUFFER"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_read_buffer sg_write_buffer
+}
+basic_body()
+{
+ create_ramdisk
+
+ # Write to its buffer
+ cp /etc/passwd input
+ len=`wc -c input | cut -wf 2`
+ atf_check -o ignore sg_write_buffer --mode data --in=input /dev/$dev
+
+ # Read it back
+ atf_check -o save:output sg_read_buffer --mode data -l $len --raw /dev/$dev
+
+ # And verify
+ if ! diff -q input output; then
+ atf_fail "Miscompare!"
+ fi
+}
+basic_cleanup()
+{
+ cleanup
+}
+
+# Read from the Descriptor mode. Along with Data, these are the only two modes
+# we support.
+atf_test_case desc cleanup
+desc_head()
+{
+ atf_set "descr" "READ BUFFER can retrieve the buffer size via the DESCRIPTOR mode"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_read_buffer
+}
+desc_body()
+{
+ create_ramdisk
+
+ atf_check -o inline:" 00 00 04 00 00\n" sg_read_buffer --hex --mode desc /dev/$dev
+}
+desc_cleanup()
+{
+ cleanup
+}
+
+atf_test_case length cleanup
+length_head()
+{
+ atf_set "descr" "READ BUFFER can limit its length with the LENGTH field"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_read_buffer sg_write_buffer
+}
+length_body()
+{
+ create_ramdisk
+
+ # Write to its buffer
+ atf_check -o ignore -e ignore dd if=/dev/random of=input bs=4096 count=1
+ atf_check -o ignore -e ignore dd if=input bs=2048 count=1 of=expected
+ atf_check -o ignore sg_write_buffer --mode data --in=input /dev/$dev
+
+ # Read it back
+ atf_check -o save:output sg_read_buffer --mode data -l 2048 --raw /dev/$dev
+
+ # And verify
+ if ! diff -q expected output; then
+ atf_fail "Miscompare!"
+ fi
+}
+length_cleanup()
+{
+ cleanup
+}
+
+atf_test_case offset cleanup
+offset_head()
+{
+ atf_set "descr" "READ BUFFER accepts the BUFFER OFFSET field"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_read_buffer sg_write_buffer
+}
+offset_body()
+{
+ create_ramdisk
+
+ # Write to its buffer
+ atf_check -o ignore -e ignore dd if=/dev/random of=input bs=4096 count=1
+ atf_check -o ignore -e ignore dd if=input iseek=2 bs=512 count=1 of=expected
+ atf_check -o ignore sg_write_buffer --mode data --in=input /dev/$dev
+
+ # Read it back
+ atf_check -o save:output sg_read_buffer --mode data -l 512 -o 1024 --raw /dev/$dev
+
+ # And verify
+ if ! diff -q expected output; then
+ atf_fail "Miscompare!"
+ fi
+}
+offset_cleanup()
+{
+ cleanup
+}
+
+atf_test_case uninitialized cleanup
+uninitialized_head()
+{
+ atf_set "descr" "READ BUFFER buffers are zero-initialized"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_read_buffer
+}
+uninitialized_body()
+{
+ create_ramdisk
+
+ # Read an uninitialized buffer
+ atf_check -o save:output sg_read_buffer --mode data -l 262144 --raw /dev/$dev
+
+ # And verify
+ atf_check -o ignore -e ignore dd if=/dev/zero bs=262144 count=1 of=expected
+ if ! diff -q expected output; then
+ atf_fail "Miscompare!"
+ fi
+}
+uninitialized_cleanup()
+{
+ cleanup
+}
+
+atf_init_test_cases()
+{
+ atf_add_test_case basic
+ atf_add_test_case desc
+ atf_add_test_case length
+ atf_add_test_case offset
+ atf_add_test_case uninitialized
+}
diff --git a/tests/sys/cam/ctl/start_stop_unit.sh b/tests/sys/cam/ctl/start_stop_unit.sh
new file mode 100644
index 000000000000..163011c8f574
--- /dev/null
+++ b/tests/sys/cam/ctl/start_stop_unit.sh
@@ -0,0 +1,150 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (c) 2024 Axcient
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS DOCUMENTATION IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+. $(atf_get_srcdir)/ctl.subr
+
+# TODO:
+# * format layer
+# * IMM bit
+# * LOEJ
+# * noflush
+# * power conditions
+
+# Not Tested
+# * Power Condition Modifier (not implemented in CTL)
+
+atf_test_case eject cleanup
+eject_head()
+{
+ atf_set "descr" "START STOP UNIT can eject a CDROM device"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_start sg_readcap
+}
+eject_body()
+{
+ # -t 5 for CD/DVD device type
+ create_ramdisk -t 5
+
+ # Verify that the device is online
+ # Too bad I don't know of any other way to check that it's stopped but
+ # by using sg_readcap.
+ atf_check -o ignore -e not-match:"Device not ready" sg_readcap /dev/$dev
+
+ # eject the device
+ atf_check sg_start --eject /dev/$dev
+
+ # Ejected, it should now return ENXIO
+ atf_check -s exit:1 -o ignore -e match:"Device not configured" dd if=/dev/$dev bs=4096 count=1 of=/dev/null
+}
+eject_cleanup()
+{
+ cleanup
+}
+
+atf_test_case load cleanup
+load_head()
+{
+ atf_set "descr" "START STOP UNIT can load a CDROM device"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_start sg_readcap
+}
+load_body()
+{
+ # -t 5 for CD/DVD device type
+ create_ramdisk -t 5
+
+ # eject the device
+ atf_check sg_start --eject /dev/$dev
+
+ # Verify that it's offline it should now return ENXIO
+ atf_check -s exit:1 -o ignore -e match:"Device not configured" dd if=/dev/$dev bs=4096 count=1 of=/dev/null
+
+ # Load it again
+ atf_check sg_start --load /dev/$dev
+
+ atf_check -o ignore -e ignore dd if=/dev/$dev bs=4096 count=1 of=/dev/null
+ atf_check -o ignore -e not-match:"Device not ready" sg_readcap /dev/$dev
+}
+load_cleanup()
+{
+ cleanup
+}
+
+atf_test_case start cleanup
+start_head()
+{
+ atf_set "descr" "START STOP UNIT can start a device"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_start sg_readcap
+}
+start_body()
+{
+ create_ramdisk
+
+ # stop the device
+ atf_check sg_start --stop /dev/$dev
+
+ # And start it again
+ atf_check sg_start /dev/$dev
+
+ # Now sg_readcap should succeed. Too bad I don't know of any other way
+ # to check that it's stopped.
+ atf_check -o ignore -e not-match:"Device not ready" sg_readcap /dev/$dev
+}
+start_cleanup()
+{
+ cleanup
+}
+
+atf_test_case stop cleanup
+stop_head()
+{
+ atf_set "descr" "START STOP UNIT can stop a device"
+ atf_set "require.user" "root"
+ atf_set "require.progs" sg_start sg_readcap
+}
+stop_body()
+{
+ create_ramdisk
+
+ # Stop the device
+ atf_check sg_start --stop /dev/$dev
+
+ # Now sg_readcap should fail. Too bad I don't know of any other way to
+ # check that it's stopped.
+ atf_check -s exit:2 -e match:"Device not ready" sg_readcap /dev/$dev
+}
+stop_cleanup()
+{
+ cleanup
+}
+
+atf_init_test_cases()
+{
+ atf_add_test_case eject
+ atf_add_test_case load
+ atf_add_test_case start
+ atf_add_test_case stop
+}
diff --git a/tests/sys/capsicum/Makefile b/tests/sys/capsicum/Makefile
index 6d37cfa08056..fd8dcb29d65c 100644
--- a/tests/sys/capsicum/Makefile
+++ b/tests/sys/capsicum/Makefile
@@ -13,6 +13,10 @@ CFLAGS+= -I${SRCTOP}/tests
GTESTS+= capsicum-test
GTESTS_WRAPPER_SH.capsicum-test= functional
+# This test script runs the same test suite twice, once as root and once as an
+# unprivileged user. Serialize them since some tests access global namespaces,
+# e.g., mqueuefs, and can trample on each other.
+TEST_METADATA.functional+= is_exclusive="true"
SRCS.capsicum-test+= \
capsicum-test-main.cc \
diff --git a/tests/sys/geom/class/gate/ggate_test.sh b/tests/sys/geom/class/gate/ggate_test.sh
index 3ca5c3a2531a..4cbc5d80ae89 100644
--- a/tests/sys/geom/class/gate/ggate_test.sh
+++ b/tests/sys/geom/class/gate/ggate_test.sh
@@ -1,7 +1,5 @@
-
PIDFILE=ggated.pid
PLAINFILES=plainfiles
-PORT=33080
CONF=gg.exports
atf_test_case ggatec_trim cleanup
@@ -17,13 +15,14 @@ ggatec_trim_body()
{
load_ggate
+ port=33080
us=$(alloc_ggate_dev)
work=$(alloc_md)
atf_check -e ignore -o ignore dd if=/dev/random of=/dev/$work bs=1m count=1 conv=notrunc
echo $CONF >> $PLAINFILES
echo "localhost RW /dev/$work" > $CONF
- atf_check ggated -p $PORT -F $PIDFILE $CONF
- atf_check ggatec create -p $PORT -u $us localhost /dev/$work
+ atf_check ggated -p $port -F $PIDFILE $CONF
+ atf_check ggatec create -p $port -u $us localhost /dev/$work
ggate_dev=/dev/ggate${us}
wait_for_ggate_device ${ggate_dev}
@@ -55,6 +54,7 @@ ggated_body()
load_ggate
+ port=33081
us=$(alloc_ggate_dev)
work=$(alloc_md)
src=$(alloc_md)
@@ -67,8 +67,8 @@ ggated_body()
echo $CONF >> $PLAINFILES
echo "127.0.0.1 RW /dev/$work" > $CONF
- atf_check ggated -p $PORT -F $PIDFILE $CONF
- atf_check ggatec create -p $PORT -u $us 127.0.0.1 /dev/$work
+ atf_check ggated -p $port -F $PIDFILE $CONF
+ atf_check ggatec create -p $port -u $us 127.0.0.1 /dev/$work
ggate_dev=/dev/ggate${us}
diff --git a/tests/sys/netpfil/pf/icmp6.sh b/tests/sys/netpfil/pf/icmp6.sh
index b9b60a484afc..eb286e23ef4c 100644
--- a/tests/sys/netpfil/pf/icmp6.sh
+++ b/tests/sys/netpfil/pf/icmp6.sh
@@ -149,8 +149,56 @@ ttl_exceeded_cleanup()
pft_cleanup
}
+atf_test_case "repeat" "cleanup"
+repeat_head()
+{
+ atf_set descr 'Ensure that repeated NDs work'
+ atf_set require.user root
+ atf_set require.progs ndisc6
+}
+
+repeat_body()
+{
+ pft_init
+
+ epair=$(vnet_mkepair)
+ ifconfig ${epair}a inet6 2001:db8::2/64 up no_dad
+
+ vnet_mkjail alcatraz ${epair}b
+ jexec alcatraz ifconfig ${epair}b inet6 2001:db8::1/64 up no_dad
+
+ # Sanity check
+ atf_check -s exit:0 -o ignore \
+ ping -c 1 2001:db8::1
+
+ jexec alcatraz pfctl -e
+ pft_set_rules alcatraz \
+ "block all" \
+ "pass quick inet6 proto ipv6-icmp all icmp6-type neighbrsol keep state (if-bound) ridentifier 1000000107"
+
+ jexec alcatraz pfctl -x loud
+ ndisc6 -m -n -r 1 2001:db8::1 ${epair}a
+ jexec alcatraz pfctl -ss -vv
+
+ atf_check -s exit:0 -o ignore \
+ ndisc6 -m -n -r 1 2001:db8::1 ${epair}a
+ jexec alcatraz pfctl -ss -vv
+ atf_check -s exit:0 -o ignore \
+ ndisc6 -m -n -r 1 2001:db8::1 ${epair}a
+ jexec alcatraz pfctl -ss -vv
+ atf_check -s exit:0 -o ignore \
+ ndisc6 -m -n -r 1 2001:db8::1 ${epair}a
+ jexec alcatraz pfctl -ss -vv
+}
+
+repeat_cleanup()
+{
+ pft_cleanup
+}
+
atf_init_test_cases()
{
atf_add_test_case "zero_id"
atf_add_test_case "ttl_exceeded"
+ atf_add_test_case "repeat"
}
diff --git a/tests/sys/netpfil/pf/nat.sh b/tests/sys/netpfil/pf/nat.sh
index 513abfa5e040..aaa49805c772 100644
--- a/tests/sys/netpfil/pf/nat.sh
+++ b/tests/sys/netpfil/pf/nat.sh
@@ -112,6 +112,139 @@ nested_anchor_body()
}
+atf_test_case "endpoint_independent" "cleanup"
+endpoint_independent_head()
+{
+ atf_set descr 'Test that a client behind NAT gets the same external IP:port for different servers'
+ atf_set require.user root
+}
+
+endpoint_independent_body()
+{
+ pft_init
+ filter="udp and dst port 1234" # only capture udp pings
+
+ epair_client=$(vnet_mkepair)
+ epair_nat=$(vnet_mkepair)
+ epair_server1=$(vnet_mkepair)
+ epair_server2=$(vnet_mkepair)
+ bridge=$(vnet_mkbridge)
+
+ vnet_mkjail nat ${epair_client}b ${epair_nat}a
+ vnet_mkjail client ${epair_client}a
+ vnet_mkjail server1 ${epair_server1}a
+ vnet_mkjail server2 ${epair_server2}a
+
+ ifconfig ${epair_server1}b up
+ ifconfig ${epair_server2}b up
+ ifconfig ${epair_nat}b up
+ ifconfig ${bridge} \
+ addm ${epair_server1}b \
+ addm ${epair_server2}b \
+ addm ${epair_nat}b \
+ up
+
+ jexec nat ifconfig ${epair_client}b 192.0.2.1/24 up
+ jexec nat ifconfig ${epair_nat}a 198.51.100.42/24 up
+ jexec nat sysctl net.inet.ip.forwarding=1
+
+ jexec client ifconfig ${epair_client}a 192.0.2.2/24 up
+ jexec client route add default 192.0.2.1
+
+ jexec server1 ifconfig ${epair_server1}a 198.51.100.32/24 up
+ jexec server2 ifconfig ${epair_server2}a 198.51.100.22/24 up
+
+ # Enable pf!
+ jexec nat pfctl -e
+
+ # validate non-endpoint independent nat rule behaviour
+ pft_set_rules nat \
+ "nat on ${epair_nat}a inet from ! (${epair_nat}a) to any -> (${epair_nat}a)"
+
+ jexec server1 tcpdump -i ${epair_server1}a -w ${PWD}/server1.pcap \
+ --immediate-mode $filter &
+ server1tcppid="$!"
+ jexec server2 tcpdump -i ${epair_server2}a -w ${PWD}/server2.pcap \
+ --immediate-mode $filter &
+ server2tcppid="$!"
+
+ # send out multiple packets
+ for i in $(seq 1 10); do
+ echo "ping" | jexec client nc -u 198.51.100.32 1234 -p 4242 -w 0
+ echo "ping" | jexec client nc -u 198.51.100.22 1234 -p 4242 -w 0
+ done
+
+ kill $server1tcppid
+ kill $server2tcppid
+
+ tuple_server1=$(tcpdump -r ${PWD}/server1.pcap | awk '{addr=$3} END {print addr}')
+ tuple_server2=$(tcpdump -r ${PWD}/server2.pcap | awk '{addr=$3} END {print addr}')
+
+ if [ -z $tuple_server1 ]
+ then
+ atf_fail "server1 did not receive connection from client (default)"
+ fi
+
+ if [ -z $tuple_server2 ]
+ then
+ atf_fail "server2 did not receive connection from client (default)"
+ fi
+
+ if [ "$tuple_server1" = "$tuple_server2" ]
+ then
+ echo "server1 tcpdump: $tuple_server1"
+ echo "server2 tcpdump: $tuple_server2"
+ atf_fail "Received same IP:port on server1 and server2 (default)"
+ fi
+
+ # validate endpoint independent nat rule behaviour
+ pft_set_rules nat \
+ "nat on ${epair_nat}a inet from ! (${epair_nat}a) to any -> (${epair_nat}a) endpoint-independent"
+
+ jexec server1 tcpdump -i ${epair_server1}a -w ${PWD}/server1.pcap \
+ --immediate-mode $filter &
+ server1tcppid="$!"
+ jexec server2 tcpdump -i ${epair_server2}a -w ${PWD}/server2.pcap \
+ --immediate-mode $filter &
+ server2tcppid="$!"
+
+ # send out multiple packets, sometimes one fails to go through
+ for i in $(seq 1 10); do
+ echo "ping" | jexec client nc -u 198.51.100.32 1234 -p 4242 -w 0
+ echo "ping" | jexec client nc -u 198.51.100.22 1234 -p 4242 -w 0
+ done
+
+ kill $server1tcppid
+ kill $server2tcppid
+
+ tuple_server1=$(tcpdump -r ${PWD}/server1.pcap | awk '{addr=$3} END {print addr}')
+ tuple_server2=$(tcpdump -r ${PWD}/server2.pcap | awk '{addr=$3} END {print addr}')
+
+ if [ -z $tuple_server1 ]
+ then
+ atf_fail "server1 did not receive connection from client (endpoint-independent)"
+ fi
+
+ if [ -z $tuple_server2 ]
+ then
+ atf_fail "server2 did not receive connection from client (endpoint-independent)"
+ fi
+
+ if [ ! "$tuple_server1" = "$tuple_server2" ]
+ then
+ echo "server1 tcpdump: $tuple_server1"
+ echo "server2 tcpdump: $tuple_server2"
+ atf_fail "Received different IP:port on server1 than server2 (endpoint-independent)"
+ fi
+}
+
+endpoint_independent_cleanup()
+{
+ pft_cleanup
+ rm -f server1.out
+ rm -f server2.out
+}
+
nested_anchor_cleanup()
{
pft_cleanup
@@ -121,4 +254,5 @@ atf_init_test_cases()
{
atf_add_test_case "exhaust"
atf_add_test_case "nested_anchor"
+ atf_add_test_case "endpoint_independent"
}
diff --git a/tools/build/cross-build/fake_sysctl.c b/tools/build/cross-build/fake_sysctl.c
index 4f1b271f3858..c4e40ebb9e72 100644
--- a/tools/build/cross-build/fake_sysctl.c
+++ b/tools/build/cross-build/fake_sysctl.c
@@ -53,6 +53,7 @@ __freebsd_sysctlbyname(const char *name, void *oldp, size_t *oldlenp,
errx(EX_USAGE, "kern.vm_guest is read-only");
strlcpy(oldp, "none", *oldlenp);
*oldlenp = strlen("none");
+ return (0);
}
errx(EX_USAGE, "fatal: unknown sysctl %s\n", name);
}
diff --git a/usr.bin/beep/beep.c b/usr.bin/beep/beep.c
index 151236b4825b..2696bacfacf4 100644
--- a/usr.bin/beep/beep.c
+++ b/usr.bin/beep/beep.c
@@ -202,7 +202,7 @@ main(int argc, char **argv)
f = open(oss_dev, O_WRONLY);
if (f < 0)
- errx(1, "Failed to open '%s'", oss_dev);
+ err(1, "Failed to open '%s'", oss_dev);
c = 1; /* mono */
if (ioctl(f, SOUND_PCM_WRITE_CHANNELS, &c) != 0)
diff --git a/usr.bin/du/du.c b/usr.bin/du/du.c
index 96ad7c037dfd..185a5cbe4465 100644
--- a/usr.bin/du/du.c
+++ b/usr.bin/du/du.c
@@ -55,6 +55,8 @@
#define UNITS_2 1
#define UNITS_SI 2
+#define DU_XO_VERSION "1"
+
static SLIST_HEAD(ignhead, ignentry) ignores;
struct ignentry {
char *mask;
@@ -259,6 +261,8 @@ main(int argc, char *argv[])
if ((fts = fts_open(argv, ftsoptions, NULL)) == NULL)
err(1, "fts_open");
+
+ xo_set_version(DU_XO_VERSION);
xo_open_container("disk-usage-information");
xo_open_list("paths");
while (errno = 0, (p = fts_read(fts)) != NULL) {
diff --git a/usr.bin/fetch/fetch.c b/usr.bin/fetch/fetch.c
index 5ff84d0fdd14..5e4b14289ca7 100644
--- a/usr.bin/fetch/fetch.c
+++ b/usr.bin/fetch/fetch.c
@@ -582,16 +582,17 @@ fetch(char *URL, const char *path, int *is_http)
if (sigalrm || sigint)
goto signal;
if (f == NULL) {
- warnx("%s: %s", URL, fetchLastErrString);
- if (i_flag && (strcmp(url->scheme, SCHEME_HTTP) == 0 ||
- strcmp(url->scheme, SCHEME_HTTPS) == 0) &&
- fetchLastErrCode == FETCH_OK &&
+ if (i_flag && *is_http && fetchLastErrCode == FETCH_OK &&
strcmp(fetchLastErrString, "Not Modified") == 0) {
/* HTTP Not Modified Response, return OK. */
+ if (v_level > 0)
+ warnx("%s: %s", URL, fetchLastErrString);
r = 0;
goto done;
- } else
+ } else {
+ warnx("%s: %s", URL, fetchLastErrString);
goto failure;
+ }
}
if (sigint)
goto signal;
diff --git a/usr.bin/getaddrinfo/getaddrinfo.1 b/usr.bin/getaddrinfo/getaddrinfo.1
index ff99cd1ea1ee..fa9e8adce6a2 100644
--- a/usr.bin/getaddrinfo/getaddrinfo.1
+++ b/usr.bin/getaddrinfo/getaddrinfo.1
@@ -171,7 +171,8 @@ stream inet tcp 199.233.217.249 80
.Xr nsswitch.conf 5 ,
.Xr protocols 5 ,
.Xr resolv.conf 5 ,
-.Xr services 5
+.Xr services 5 ,
+.Xr ip6addrctl 8
.Sh HISTORY
The
.Nm
diff --git a/usr.bin/grep/grep.c b/usr.bin/grep/grep.c
index 9f960f74dbb6..feaf17d7c1e1 100644
--- a/usr.bin/grep/grep.c
+++ b/usr.bin/grep/grep.c
@@ -112,7 +112,7 @@ int binbehave = BINFILE_BIN; /* -aIU: handling of binary files */
int filebehave = FILE_STDIO;
int devbehave = DEV_READ; /* -D: handling of devices */
int dirbehave = DIR_READ; /* -dRr: handling of directories */
-int linkbehave = LINK_READ; /* -OpS: handling of symlinks */
+int linkbehave = LINK_SKIP; /* -OpS: handling of symlinks */
bool dexclude, dinclude; /* --exclude-dir and --include-dir */
bool fexclude, finclude; /* --exclude and --include */
diff --git a/usr.bin/grep/util.c b/usr.bin/grep/util.c
index 936abc41b3ef..4e1c44b442f2 100644
--- a/usr.bin/grep/util.c
+++ b/usr.bin/grep/util.c
@@ -136,16 +136,16 @@ grep_tree(char **argv)
/* This switch effectively initializes 'fts_flags' */
switch(linkbehave) {
case LINK_EXPLICIT:
- fts_flags = FTS_COMFOLLOW;
+ fts_flags = FTS_COMFOLLOW | FTS_PHYSICAL;
break;
case LINK_SKIP:
fts_flags = FTS_PHYSICAL;
break;
default:
- fts_flags = FTS_LOGICAL;
+ fts_flags = FTS_LOGICAL | FTS_NOSTAT;
}
- fts_flags |= FTS_NOSTAT | FTS_NOCHDIR;
+ fts_flags |= FTS_NOCHDIR;
fts = fts_open((argv[0] == NULL) ?
__DECONST(char * const *, wd) : argv, fts_flags, NULL);
@@ -154,15 +154,13 @@ grep_tree(char **argv)
while (errno = 0, (p = fts_read(fts)) != NULL) {
switch (p->fts_info) {
case FTS_DNR:
- /* FALLTHROUGH */
case FTS_ERR:
+ case FTS_NS:
file_err = true;
if(!sflag)
- warnx("%s: %s", p->fts_path, strerror(p->fts_errno));
+ warnc(p->fts_errno, "%s", p->fts_path);
break;
case FTS_D:
- /* FALLTHROUGH */
- case FTS_DP:
if (dexclude || dinclude)
if (!dir_matching(p->fts_name) ||
!dir_matching(p->fts_path))
@@ -173,6 +171,17 @@ grep_tree(char **argv)
warnx("warning: %s: recursive directory loop",
p->fts_path);
break;
+ case FTS_DP:
+ break;
+ case FTS_SL:
+ /*
+ * Skip symlinks for LINK_EXPLICIT and
+ * LINK_SKIP. Note that due to FTS_COMFOLLOW,
+ * symlinks on the command line are followed
+ * for LINK_EXPLICIT and not reported as
+ * symlinks.
+ */
+ break;
default:
/* Check for file exclusion/inclusion */
ok = true;
diff --git a/usr.bin/iscsictl/iscsictl.c b/usr.bin/iscsictl/iscsictl.c
index d95cb9a1c096..b75ff889a9a6 100644
--- a/usr.bin/iscsictl/iscsictl.c
+++ b/usr.bin/iscsictl/iscsictl.c
@@ -746,6 +746,7 @@ main(int argc, char **argv)
if (argc < 0)
exit(1);
+ xo_set_version(ISCSICTL_XO_VERSION);
xo_open_container("iscsictl");
while ((ch = getopt(argc, argv, "AMRLac:d:e:i:n:p:rt:u:s:vw:")) != -1) {
diff --git a/usr.bin/iscsictl/iscsictl.h b/usr.bin/iscsictl/iscsictl.h
index 2ac17890bb57..3bc69e4877a9 100644
--- a/usr.bin/iscsictl/iscsictl.h
+++ b/usr.bin/iscsictl/iscsictl.h
@@ -38,6 +38,8 @@
#define DEFAULT_CONFIG_PATH "/etc/iscsi.conf"
#define DEFAULT_IQN "iqn.1994-09.org.freebsd:"
+#define ISCSICTL_XO_VERSION "1"
+
#define MAX_NAME_LEN 223
#define AUTH_METHOD_UNSPECIFIED 0
diff --git a/usr.bin/netstat/main.c b/usr.bin/netstat/main.c
index 2ed6eca4626e..6d19851b61fc 100644
--- a/usr.bin/netstat/main.c
+++ b/usr.bin/netstat/main.c
@@ -510,6 +510,7 @@ main(int argc, char *argv[])
#endif
if (iflag && !sflag) {
xo_open_container("statistics");
+ xo_set_version(NETSTAT_XO_VERSION);
intpr(NULL, af);
xo_close_container("statistics");
xo_finish();
@@ -517,6 +518,7 @@ main(int argc, char *argv[])
}
if (rflag) {
xo_open_container("statistics");
+ xo_set_version(NETSTAT_XO_VERSION);
if (sflag) {
if (live) {
kresolve_list(nl);
@@ -530,6 +532,7 @@ main(int argc, char *argv[])
}
if (oflag) {
xo_open_container("statistics");
+ xo_set_version(NETSTAT_XO_VERSION);
nhops_print(fib, af);
xo_close_container("statistics");
xo_finish();
@@ -537,6 +540,7 @@ main(int argc, char *argv[])
}
if (Oflag) {
xo_open_container("statistics");
+ xo_set_version(NETSTAT_XO_VERSION);
nhgrp_print(fib, af);
xo_close_container("statistics");
xo_finish();
@@ -547,6 +551,7 @@ main(int argc, char *argv[])
if (gflag) {
xo_open_container("statistics");
+ xo_set_version(NETSTAT_XO_VERSION);
if (sflag) {
if (af == AF_INET || af == AF_UNSPEC)
mrt_stats();
@@ -569,6 +574,7 @@ main(int argc, char *argv[])
if (tp) {
xo_open_container("statistics");
+ xo_set_version(NETSTAT_XO_VERSION);
printproto(tp, tp->pr_name, &first);
if (!first)
xo_close_list("socket");
@@ -578,6 +584,7 @@ main(int argc, char *argv[])
}
xo_open_container("statistics");
+ xo_set_version(NETSTAT_XO_VERSION);
if (af == AF_INET || af == AF_UNSPEC)
for (tp = protox; tp->pr_name; tp++)
printproto(tp, tp->pr_name, &first);
diff --git a/usr.bin/netstat/netstat.h b/usr.bin/netstat/netstat.h
index c41862d9fbdd..7ebfc5180f44 100644
--- a/usr.bin/netstat/netstat.h
+++ b/usr.bin/netstat/netstat.h
@@ -31,6 +31,8 @@
#include
+#define NETSTAT_XO_VERSION "1"
+
#define satosin(sa) ((struct sockaddr_in *)(sa))
#define satosin6(sa) ((struct sockaddr_in6 *)(sa))
#define sin6tosa(sin6) ((struct sockaddr *)(sin6))
diff --git a/usr.bin/tcopy/tcopy.1 b/usr.bin/tcopy/tcopy.1
index da6a5231a843..3f12a807e41e 100644
--- a/usr.bin/tcopy/tcopy.1
+++ b/usr.bin/tcopy/tcopy.1
@@ -97,6 +97,16 @@ command appeared in
.Sh BUGS
.Bl -item
.It
+Modern tape drives may return a SCSI "Incorrect Length Indicator (ILI)"
+for each read with a different block size that what is on the
+tape, and that slows things down a lot.
+This can be disabled with the
+.Xr mt 1
+command:
+.Bd -literal -offset indent
+$ mt param sili -s 1
+.Ed
+.It
Writing an image of a tape to a file does not preserve much more than
the raw data.
Block size(s) and tape EOF marks are lost which would
diff --git a/usr.bin/vmstat/vmstat.c b/usr.bin/vmstat/vmstat.c
index c781a80daed9..1bde43939207 100644
--- a/usr.bin/vmstat/vmstat.c
+++ b/usr.bin/vmstat/vmstat.c
@@ -66,7 +66,7 @@
#include
#include
-#define VMSTAT_XO_VERSION "1"
+#define VMSTAT_XO_VERSION "2"
static char da[] = "da";
@@ -282,6 +282,7 @@ main(int argc, char *argv[])
argv += optind;
xo_set_version(VMSTAT_XO_VERSION);
+ xo_open_container("vmstat");
if (!hflag)
xo_set_options(NULL, "no-humanize");
if (todo == 0)
@@ -383,6 +384,7 @@ main(int argc, char *argv[])
dointr(interval, reps);
if (todo & VMSTAT)
dovmstat(interval, reps);
+ xo_close_container("vmstat");
xo_finish();
exit(0);
}
diff --git a/usr.bin/w/w.c b/usr.bin/w/w.c
index 8bce6d8427e6..47899d0b38e0 100644
--- a/usr.bin/w/w.c
+++ b/usr.bin/w/w.c
@@ -108,6 +108,8 @@ static struct entry {
#define debugproc(p) *(&((struct kinfo_proc *)p)->ki_udata)
+#define W_XO_VERSION "1"
+
#define W_DISPUSERSIZE 10
#define W_DISPLINESIZE 8
#define W_MAXHOSTSIZE 40
@@ -317,6 +319,7 @@ main(int argc, char *argv[])
if (fromwidth > W_MAXHOSTSIZE)
fromwidth = W_MAXHOSTSIZE;
+ xo_set_version(W_XO_VERSION);
xo_open_container("uptime-information");
if (header || wcmd == 0) {
diff --git a/usr.bin/wc/wc.c b/usr.bin/wc/wc.c
index 4e8a2d8a8b59..7b83412f3c42 100644
--- a/usr.bin/wc/wc.c
+++ b/usr.bin/wc/wc.c
@@ -51,6 +51,8 @@
#include
#include
+#define WC_XO_VERSION "1"
+
static const char *stdin_filename = "stdin";
static fileargs_t *fa;
@@ -132,6 +134,8 @@ main(int argc, char *argv[])
doline = doword = dochar = true;
stderr_handle = xo_create_to_file(stderr, XO_STYLE_TEXT, 0);
+
+ xo_set_version(WC_XO_VERSION);
xo_open_container("wc");
xo_open_list("file");
diff --git a/usr.sbin/adduser/adduser.8 b/usr.sbin/adduser/adduser.8
index ed67e21f9430..8ba623fedd9d 100644
--- a/usr.sbin/adduser/adduser.8
+++ b/usr.sbin/adduser/adduser.8
@@ -1,3 +1,6 @@
+.\"-
+.\" SPDX-License-Identifier: BSD-2-Clause
+.\"
.\" Copyright (c) 1995-1996 Wolfram Schneider . Berlin.
.\" All rights reserved.
.\" Copyright (c) 2002-2004 Michael Telahun Makonnen
@@ -427,7 +430,7 @@ message file for
.Nm
.It Pa /usr/share/skel
skeletal login directory
-.It Pa /var/log/adduser
+.It Pa /var/log/userlog
logfile for
.Nm
.El
diff --git a/usr.sbin/ip6addrctl/ip6addrctl.8 b/usr.sbin/ip6addrctl/ip6addrctl.8
index f50da59aa2bb..50245cef91ea 100644
--- a/usr.sbin/ip6addrctl/ip6addrctl.8
+++ b/usr.sbin/ip6addrctl/ip6addrctl.8
@@ -27,7 +27,7 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.Dd September 25, 2001
+.Dd August 10, 2024
.Dt IP6ADDRCTL 8
.Os
.\"
@@ -106,10 +106,15 @@ comments and are ignored.
.Ex -std
.\"
.Sh SEE ALSO
+.Xr getaddrinfo 1 ,
+.Xr getaddrinfo 3
.Rs
+.%A "Dave Thaler"
.%A "Richard Draves"
-.%T "Default Address Selection for IPv6"
-.%N RFC 3484
+.%A "Arifumi Matsumoto"
+.%A "Tim Chown"
+.%T "Default Address Selection for Internet Protocol Version 6 (IPv6)"
+.%R RFC 6724
.Re
.\"
.Sh HISTORY
diff --git a/usr.sbin/ip6addrctl/ip6addrctl.conf.sample b/usr.sbin/ip6addrctl/ip6addrctl.conf.sample
index edcf77553f14..59c1f3db47e0 100644
--- a/usr.sbin/ip6addrctl/ip6addrctl.conf.sample
+++ b/usr.sbin/ip6addrctl/ip6addrctl.conf.sample
@@ -1,11 +1,15 @@
-# default policy table based on RFC 3484.
+# default policy table based on RFC 6724.
# usage: ip6addrctl install path_to_this_file
#
#
#Format:
#Prefix Precedence Label
-::1/128 50 0
-::/0 40 1
-2002::/16 30 2
-::/96 20 3
-::ffff:0:0/96 10 4
+::1/128 50 0
+::/0 40 1
+::ffff:0:0/96 35 4
+2002::/16 30 2
+2001::/32 5 5
+fc00::/7 3 13
+::/96 1 3
+fec0::/10 1 11
+3ffe::/16 1 12
diff --git a/usr.sbin/lastlogin/lastlogin.c b/usr.sbin/lastlogin/lastlogin.c
index a1dcde3f60bf..3a71693f7576 100644
--- a/usr.sbin/lastlogin/lastlogin.c
+++ b/usr.sbin/lastlogin/lastlogin.c
@@ -48,6 +48,8 @@ __RCSID("$NetBSD: lastlogin.c,v 1.4 1998/02/03 04:45:35 perry Exp $");
#include
+#define LASTLOGIN_XO_VERSION "1"
+
int main(int, char **);
static void output(struct utmpx *);
static void usage(void);
@@ -103,6 +105,7 @@ main(int argc, char *argv[])
argc -= optind;
argv += optind;
+ xo_set_version(LASTLOGIN_XO_VERSION);
xo_open_container("lastlogin-information");
xo_open_list("lastlogin");
diff --git a/usr.sbin/mfiutil/mfi_flash.c b/usr.sbin/mfiutil/mfi_flash.c
index 2fbfc978edac..4d1930af4941 100644
--- a/usr.sbin/mfiutil/mfi_flash.c
+++ b/usr.sbin/mfiutil/mfi_flash.c
@@ -129,9 +129,15 @@ flash_adapter(int ac, char **av)
/* First, ask the firmware to allocate space for the flash file. */
mbox_store_word(mbox, sb.st_size);
- mfi_dcmd_command(fd, MFI_DCMD_FLASH_FW_OPEN, NULL, 0, mbox, 4, &status);
+ if (mfi_dcmd_command(fd, MFI_DCMD_FLASH_FW_OPEN, NULL, 0, mbox, 4,
+ &status) < 0) {
+ error = errno;
+ warn("Failed to allocate flash memory");
+ goto error;
+ }
if (status != MFI_STAT_OK) {
- warnx("Failed to alloc flash memory: %s", mfi_status(status));
+ warnx("Failed to allocate flash memory: %s",
+ mfi_status(status));
error = EIO;
goto error;
}
@@ -148,19 +154,26 @@ flash_adapter(int ac, char **av)
nread = read(flash, buf, FLASH_BUF_SIZE);
if (nread <= 0 || nread % 1024 != 0) {
warnx("Bad read from flash file");
- mfi_dcmd_command(fd, MFI_DCMD_FLASH_FW_CLOSE, NULL, 0,
- NULL, 0, NULL);
+ if (mfi_dcmd_command(fd, MFI_DCMD_FLASH_FW_CLOSE,
+ NULL, 0, NULL, 0, NULL) < 0) {
+ warn("Failed to discard flash memory");
+ }
error = ENXIO;
goto error;
}
mbox_store_word(mbox, offset);
- mfi_dcmd_command(fd, MFI_DCMD_FLASH_FW_DOWNLOAD, buf, nread,
- mbox, 4, &status);
+ if (mfi_dcmd_command(fd, MFI_DCMD_FLASH_FW_DOWNLOAD, buf, nread,
+ mbox, 4, &status) < 0) {
+ error = errno;
+ warn("Failed to download firmware");
+ goto error;
+ }
if (status != MFI_STAT_OK) {
- warnx("Flash download failed: %s", mfi_status(status));
- mfi_dcmd_command(fd, MFI_DCMD_FLASH_FW_CLOSE, NULL, 0,
- NULL, 0, NULL);
+ warnx("Failed to download firmware: %s",
+ mfi_status(status));
+ mfi_dcmd_command(fd, MFI_DCMD_FLASH_FW_CLOSE, NULL,
+ 0, NULL, 0, NULL);
error = ENXIO;
goto error;
}
@@ -171,8 +184,12 @@ flash_adapter(int ac, char **av)
/* Kick off the flash. */
printf("WARNING: Firmware flash in progress, do not reboot machine... ");
fflush(stdout);
- mfi_dcmd_command(fd, MFI_DCMD_FLASH_FW_FLASH, &dummy, sizeof(dummy),
- NULL, 0, &status);
+ if (mfi_dcmd_command(fd, MFI_DCMD_FLASH_FW_FLASH, &dummy, sizeof(dummy),
+ NULL, 0, &status) < 0) {
+ error = errno;
+ printf("failed:\n\t%s\n", strerror(error));
+ goto error;
+ }
if (status != MFI_STAT_OK) {
printf("failed:\n\t%s\n", mfi_status(status));
error = ENXIO;
diff --git a/usr.sbin/mountd/mountd.c b/usr.sbin/mountd/mountd.c
index 22d2fbb2af81..b2c3c0a5e0c6 100644
--- a/usr.sbin/mountd/mountd.c
+++ b/usr.sbin/mountd/mountd.c
@@ -2836,7 +2836,7 @@ do_opt(char **cpp, char **endcpp, struct exportlist *ep, struct grouplist *grp,
{
char *cpoptarg, *cpoptend;
char *cp, *endcp, *cpopt, savedc, savedc2;
- int allflag, usedarg;
+ int allflag, usedarg, fnd_equal;
savedc2 = '\0';
cpopt = *cpp;
@@ -2847,14 +2847,18 @@ do_opt(char **cpp, char **endcpp, struct exportlist *ep, struct grouplist *grp,
while (cpopt && *cpopt) {
allflag = 1;
usedarg = -2;
+ fnd_equal = 0;
if ((cpoptend = strchr(cpopt, ','))) {
*cpoptend++ = '\0';
- if ((cpoptarg = strchr(cpopt, '=')))
+ if ((cpoptarg = strchr(cpopt, '='))) {
*cpoptarg++ = '\0';
+ fnd_equal = 1;
+ }
} else {
- if ((cpoptarg = strchr(cpopt, '=')))
+ if ((cpoptarg = strchr(cpopt, '='))) {
*cpoptarg++ = '\0';
- else {
+ fnd_equal = 1;
+ } else {
*cp = savedc;
nextfield(&cp, &endcp);
**endcpp = '\0';
@@ -2867,6 +2871,10 @@ do_opt(char **cpp, char **endcpp, struct exportlist *ep, struct grouplist *grp,
}
}
if (!strcmp(cpopt, "ro") || !strcmp(cpopt, "o")) {
+ if (fnd_equal == 1) {
+ syslog(LOG_ERR, "= after op: %s", cpopt);
+ return (1);
+ }
*exflagsp |= MNT_EXRDONLY;
} else if (cpoptarg && (!strcmp(cpopt, "maproot") ||
!(allflag = strcmp(cpopt, "mapall")) ||
@@ -2905,15 +2913,31 @@ do_opt(char **cpp, char **endcpp, struct exportlist *ep, struct grouplist *grp,
usedarg++;
opt_flags |= OP_NET;
} else if (!strcmp(cpopt, "alldirs")) {
+ if (fnd_equal == 1) {
+ syslog(LOG_ERR, "= after op: %s", cpopt);
+ return (1);
+ }
opt_flags |= OP_ALLDIRS;
} else if (!strcmp(cpopt, "public")) {
+ if (fnd_equal == 1) {
+ syslog(LOG_ERR, "= after op: %s", cpopt);
+ return (1);
+ }
*exflagsp |= MNT_EXPUBLIC;
} else if (!strcmp(cpopt, "webnfs")) {
+ if (fnd_equal == 1) {
+ syslog(LOG_ERR, "= after op: %s", cpopt);
+ return (1);
+ }
*exflagsp |= (MNT_EXPUBLIC|MNT_EXRDONLY|MNT_EXPORTANON);
opt_flags |= OP_MAPALL;
} else if (cpoptarg && !strcmp(cpopt, "index")) {
ep->ex_indexfile = strdup(cpoptarg);
} else if (!strcmp(cpopt, "quiet")) {
+ if (fnd_equal == 1) {
+ syslog(LOG_ERR, "= after op: %s", cpopt);
+ return (1);
+ }
opt_flags |= OP_QUIET;
} else if (cpoptarg && !strcmp(cpopt, "sec")) {
if (parsesec(cpoptarg, ep))
@@ -2921,10 +2945,22 @@ do_opt(char **cpp, char **endcpp, struct exportlist *ep, struct grouplist *grp,
opt_flags |= OP_SEC;
usedarg++;
} else if (!strcmp(cpopt, "tls")) {
+ if (fnd_equal == 1) {
+ syslog(LOG_ERR, "= after op: %s", cpopt);
+ return (1);
+ }
*exflagsp |= MNT_EXTLS;
} else if (!strcmp(cpopt, "tlscert")) {
+ if (fnd_equal == 1) {
+ syslog(LOG_ERR, "= after op: %s", cpopt);
+ return (1);
+ }
*exflagsp |= (MNT_EXTLS | MNT_EXTLSCERT);
} else if (!strcmp(cpopt, "tlscertuser")) {
+ if (fnd_equal == 1) {
+ syslog(LOG_ERR, "= after op: %s", cpopt);
+ return (1);
+ }
*exflagsp |= (MNT_EXTLS | MNT_EXTLSCERT |
MNT_EXTLSCERTUSER);
} else {
diff --git a/usr.sbin/ndp/ndp.c b/usr.sbin/ndp/ndp.c
index 637aac2823ed..7c7bc60451c8 100644
--- a/usr.sbin/ndp/ndp.c
+++ b/usr.sbin/ndp/ndp.c
@@ -633,7 +633,7 @@ dump_rtsock(struct sockaddr_in6 *addr, int cflag)
if (!opts.tflag && !cflag) {
char xobuf[200];
snprintf(xobuf, sizeof(xobuf),
- "{T:/%%-%d.%ds} {T:/%%-%d.%ds} {T:/%%%d.%ds} {T:/%%-9.9s} {T:%%1s} {T:%%5s}\n",
+ "{T:/%%-%d.%ds} {T:/%%-%d.%ds} {T:/%%%d.%ds} {T:/%%-9.9s} {T:/%%1s} {T:/%%5s}\n",
W_ADDR, W_ADDR, W_LL, W_LL, W_IF, W_IF);
xo_emit(xobuf, "Neighbor", "Linklayer Address", "Netif", "Expire", "S", "Flags");
}
diff --git a/usr.sbin/ndp/ndp_netlink.c b/usr.sbin/ndp/ndp_netlink.c
index e18d64175619..bafa9f2a143d 100644
--- a/usr.sbin/ndp/ndp_netlink.c
+++ b/usr.sbin/ndp/ndp_netlink.c
@@ -341,7 +341,7 @@ print_entries_nl(uint32_t ifindex, struct sockaddr_in6 *addr, bool cflag)
if (!opts.tflag && !cflag) {
char xobuf[200];
snprintf(xobuf, sizeof(xobuf),
- "{T:/%%-%d.%ds} {T:/%%-%d.%ds} {T:/%%%d.%ds} {T:/%%-9.9s} {T:%%1s} {T:%%5s}\n",
+ "{T:/%%-%d.%ds} {T:/%%-%d.%ds} {T:/%%%d.%ds} {T:/%%-9.9s} {T:/%%1s} {T:/%%5s}\n",
W_ADDR, W_ADDR, W_LL, W_LL, W_IF, W_IF);
xo_emit(xobuf, "Neighbor", "Linklayer Address", "Netif", "Expire", "S", "Flags");
}
diff --git a/usr.sbin/nvmfd/controller.c b/usr.sbin/nvmfd/controller.c
index 09baaea74ab4..e9435bce69da 100644
--- a/usr.sbin/nvmfd/controller.c
+++ b/usr.sbin/nvmfd/controller.c
@@ -192,7 +192,7 @@ controller_handle_admin_commands(struct controller *c, handle_command *cb,
*/
if (NVMEV(NVME_CC_REG_EN, c->cc) == 0 &&
cmd->opc != NVME_OPC_FABRICS_COMMANDS) {
- warnx("Unsupported admin opcode %#x whiled disabled\n",
+ warnx("Unsupported admin opcode %#x while disabled\n",
cmd->opc);
nvmf_send_generic_error(nc,
NVME_SC_COMMAND_SEQUENCE_ERROR);