I'm working on evolving the work I'm doing on the linus-next integration
branch, and this seemed like another useful tool.
Verify that either the sender of the pull request is listed as a
maintainer for the subsystem the patches are destined for. This provides
us two things:
1.
*name)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
*name)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
*name)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
*name)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
*name)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
On 10/23/2024 10:24 AM, Bjorn Andersson wrote:
GLINK operates using pre-allocated buffers, aka intents, where incoming
messages are aggregated before being passed up the stack. In the case
that no suitable intents have been announced by the receiver, the sender
can request an intent to be
GLINK operates using pre-allocated buffers, aka intents, where incoming
messages are aggregated before being passed up the stack. In the case
that no suitable intents have been announced by the receiver, the sender
can request an intent to be allocated.
The initial implementation of the response
On Tue, Oct 22, 2024 at 04:17:11AM +, Bjorn Andersson wrote:
> The initial implementation of request intent response handling dealt
> with two outcomes; granted allocations, and all other cases being
> considered -ECANCELLED (likely from "cancelling the operation as the
> rem
The initial implementation of request intent response handling dealt
with two outcomes; granted allocations, and all other cases being
considered -ECANCELLED (likely from "cancelling the operation as the
remote is going down").
But on some channels intent allocation is not supported, i
stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE && rproc->state != RPROC_CRASHED) {
+ idx = stm32_rpr
stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE && rproc->state != RPROC_CRASHED) {
+ idx = stm32_rpr
Hello,
syzbot found the following issue on:
HEAD commit:abf2050f51fd Merge tag 'media/v6.12-1' of git://git.kernel..
git tree: upstream
console output: https://syzkaller.appspot.com/x/log.txt?x=15a0310798
kernel config: https://syzkaller.appspot.com/x/.config?x=2a8c36c5e2b56016
das
32_rproc *ddata = rproc->priv;
> + int err, dummy_data, idx;
> +
> + /* Request shutdown of the remote processor */
> + if (rproc->state != RPROC_OFFLINE && rproc->state != RPROC_CRASHED) {
> + idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_SHUTDOWN);
> +
>> reserves. GFP_NOFS is not passed to virtio_fs_enqueue_req() directly,
>>> GFP_KERNEL and memalloc_nofs_{save|restore} helpers are used instead.
>>>
>>> It may seem OK to pass GFP_NOFS to virtio_fs_enqueue_req() as well when
>>>
GFP_KERNEL and memalloc_nofs_{save|restore} helpers are used instead.
>>
>> It may seem OK to pass GFP_NOFS to virtio_fs_enqueue_req() as well when
>> queuing the request for the first time, but this is not the case. The
>> reason is that fuse_request_queue_background() may cal
t may seem OK to pass GFP_NOFS to virtio_fs_enqueue_req() as well when
> queuing the request for the first time, but this is not the case. The
> reason is that fuse_request_queue_background() may call
> ->queue_request_and_unlock() while holding fc->bg_lock, which is a
> spin-lock
*name)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, dummy_data, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
*name)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, dummy_data, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
*name)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, dummy_data, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
*name)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, dummy_data, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
On Fri, May 17, 2024 at 10:46:07PM GMT, Xuewei Niu wrote:
The new request is called `IOCTL_VM_SOCKETS_GET_LOCAL_CIDS`. And the old
one, `IOCTL_VM_SOCKETS_GET_LOCAL_CID` is retained.
For the transport that supports multi-devices:
* `IOCTL_VM_SOCKETS_GET_LOCAL_CID` returns "-1";
*name)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, dummy_data, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
*name)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, dummy_data, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
The new request is called `IOCTL_VM_SOCKETS_GET_LOCAL_CIDS`. And the old
one, `IOCTL_VM_SOCKETS_GET_LOCAL_CID` is retained.
For the transport that supports multi-devices:
* `IOCTL_VM_SOCKETS_GET_LOCAL_CID` returns "-1";
* `IOCTL_VM_SOCKETS_GET_LOCAL_CIDS` returns a vector of CIDS. Th
ctly,
>> GFP_KERNEL and memalloc_nofs_{save|restore} helpers are used instead.
> Makes sense.
>
> However, I don't understand why the GFP_NOFS behavior is optional. It
> should work when queuing the request for the first time as well, no?
No. fuse_request_queue_background() may ca
Makes sense.
However, I don't understand why the GFP_NOFS behavior is optional. It
should work when queuing the request for the first time as well, no?
Thanks,
Miklos
Hello.
We are Ubisectech Sirius Team, the vulnerability lab of China ValiantSec.
Recently, our team has discovered a issue in Linux kernel 6.7. Attached to the
email were a PoC file of the issue.
Stack dump:
BUG: unable to handle page fault for address: ed110c2fd97f
#PF: supervisor read acce
t;out_args);
- req->argbuf = kmalloc(len, GFP_ATOMIC);
+ req->argbuf = kmalloc(len, gfp);
if (!req->argbuf)
return -ENOMEM;
@@ -1183,7 +1188,8 @@ static unsigned int sg_init_fuse_args(struct scatterlist
*sg,
/* Add a request to a virtqueue and kic
rs/remoteproc/stm32_rproc.c
> @@ -209,6 +209,54 @@ static int stm32_rproc_mbox_idx(struct rproc *rproc,
> const unsigned char *name)
> return -EINVAL;
> }
>
> +static void stm32_rproc_request_shutdown(struct rproc *rproc)
> +{
> + struct stm32_rproc *ddata = rproc-
)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, dummy_data, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
spin_lock(&fsvq->lock);
@@ -1332,7 +1337,8 @@ static bool use_scattered_argbuf(struct fuse_req *req)
/* Add a request to a virtqueue and kick the device */
static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
-
)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, dummy_data, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
syzbot suspects this issue was fixed by commit:
commit ad579864637af46447208254719943179b69d41a
Author: Steven Rostedt (Google)
Date: Tue Jan 2 20:12:49 2024 +
tracefs: Check for dentry->d_inode exists in set_gid()
bisection log: https://syzkaller.appspot.com/x/bisect.txt?x=17659d241
)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, dummy_data, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
Hello.
We are Ubisectech Sirius Team, the vulnerability lab of China ValiantSec.
Recently, our team has discovered a issue in Linux kernel 6.7.0-g052d534373b7.
Attached to the email were a POC file of the issue.
Stack dump:
[ 185.664167][ T8332] BUG: unable to handle page fault for address:
)
return -EINVAL;
}
+static void stm32_rproc_request_shutdown(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, dummy_data, idx;
+
+ /* Request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE &&
On 5 Jan 2024, at 5:53, Hou Tao wrote:
> From: Hou Tao
>
> When invoking virtio_fs_enqueue_req() through kworker, both the
> allocation of the sg array and the bounce buffer still use GFP_ATOMIC.
> Considering the size of both the sg array and the bounce buffer may be
> greater than PAGE_SIZE, us
Hi Vivek,
On 1/6/2024 5:27 AM, Vivek Goyal wrote:
> On Fri, Jan 05, 2024 at 08:57:55PM +, Matthew Wilcox wrote:
>> On Fri, Jan 05, 2024 at 03:41:48PM -0500, Vivek Goyal wrote:
>>> On Fri, Jan 05, 2024 at 08:21:00PM +, Matthew Wilcox wrote:
On Fri, Jan 05, 2024 at 03:17:19PM -0500, Viv
On 1/6/2024 4:21 AM, Matthew Wilcox wrote:
> On Fri, Jan 05, 2024 at 03:17:19PM -0500, Vivek Goyal wrote:
>> On Fri, Jan 05, 2024 at 06:53:05PM +0800, Hou Tao wrote:
>>> From: Hou Tao
>>>
>>> When invoking virtio_fs_enqueue_req() through kworker, both the
>>> allocation of the sg array and the
On Fri, Jan 05, 2024 at 08:57:55PM +, Matthew Wilcox wrote:
> On Fri, Jan 05, 2024 at 03:41:48PM -0500, Vivek Goyal wrote:
> > On Fri, Jan 05, 2024 at 08:21:00PM +, Matthew Wilcox wrote:
> > > On Fri, Jan 05, 2024 at 03:17:19PM -0500, Vivek Goyal wrote:
> > > > On Fri, Jan 05, 2024 at 06:53
On Fri, Jan 05, 2024 at 03:41:48PM -0500, Vivek Goyal wrote:
> On Fri, Jan 05, 2024 at 08:21:00PM +, Matthew Wilcox wrote:
> > On Fri, Jan 05, 2024 at 03:17:19PM -0500, Vivek Goyal wrote:
> > > On Fri, Jan 05, 2024 at 06:53:05PM +0800, Hou Tao wrote:
> > > > From: Hou Tao
> > > >
> > > > When
On Fri, Jan 05, 2024 at 08:21:00PM +, Matthew Wilcox wrote:
> On Fri, Jan 05, 2024 at 03:17:19PM -0500, Vivek Goyal wrote:
> > On Fri, Jan 05, 2024 at 06:53:05PM +0800, Hou Tao wrote:
> > > From: Hou Tao
> > >
> > > When invoking virtio_fs_enqueue_req() through kworker, both the
> > > allocat
On Fri, Jan 05, 2024 at 03:17:19PM -0500, Vivek Goyal wrote:
> On Fri, Jan 05, 2024 at 06:53:05PM +0800, Hou Tao wrote:
> > From: Hou Tao
> >
> > When invoking virtio_fs_enqueue_req() through kworker, both the
> > allocation of the sg array and the bounce buffer still use GFP_ATOMIC.
> > Consider
+
> fuse_len_args(num_out, args->out_args);
>
> - req->argbuf = kmalloc(len, GFP_ATOMIC);
> + req->argbuf = kmalloc(len, gfp);
> if (!req->argbuf)
> return -ENOMEM;
>
> @@ -1119,7 +1120,8 @@
return -ENOMEM;
@@ -1119,7 +1120,8 @@ static unsigned int sg_init_fuse_args(struct scatterlist
*sg,
/* Add a request to a virtqueue and kick the device */
static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
-struct fuse_req *req, bool in_flight)
+
On Thu, Jan 04, 2024 at 09:58:05AM +0800, Hou Tao wrote:
> static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
> - struct fuse_req *req, bool in_flight);
> + struct fuse_req *req, bool in_flight,
> + boo
f = kmalloc(len, GFP_ATOMIC);
+ req->argbuf = kmalloc(len, gfp);
if (!req->argbuf)
return -ENOMEM;
@@ -1119,7 +1120,8 @@ static unsigned int sg_init_fuse_args(struct scatterlist
*sg,
/* Add a request to a virtqueue and kick the device */
sta
On Wed, 03 Jan 2024 13:41:31 -0800
syzbot wrote:
> Hello,
>
> syzbot found the following issue on:
>
> HEAD commit:453f5db0619e Merge tag 'trace-v6.7-rc7' of git://git.kerne..
> git tree: upstream
> console+strace: https://syzkaller.appspot.com/x/log.txt?x=10ec3829e8
> kernel conf
Hello,
syzbot found the following issue on:
HEAD commit:453f5db0619e Merge tag 'trace-v6.7-rc7' of git://git.kerne..
git tree: upstream
console+strace: https://syzkaller.appspot.com/x/log.txt?x=10ec3829e8
kernel config: https://syzkaller.appspot.com/x/.config?x=f8e72bae38c079e4
das
lastest net bpf titled BUG:
>>> unable to handle kernel paging request in bpf_probe_read_compat_str
>>>
>>> If you fix this issue, please add the following tag to the commit:
>>> Reported-by: xingwei Lee
>>>
>>> kernel: net 9702817384aa4
On 12/20/23 1:19 AM, Hou Tao wrote:
Hi,
On 12/14/2023 11:40 AM, xingwei lee wrote:
Hello I found a bug in net/bpf in the lastest upstream linux and
comfired in the lastest net tree and lastest net bpf titled BUG:
unable to handle kernel paging request in bpf_probe_read_compat_str
If you fix
Hi,
On 12/14/2023 11:40 AM, xingwei lee wrote:
> Hello I found a bug in net/bpf in the lastest upstream linux and
> comfired in the lastest net tree and lastest net bpf titled BUG:
> unable to handle kernel paging request in bpf_probe_read_compat_str
>
> If you fix this issue,
Hello I found a bug in net/bpf in the lastest upstream linux and
comfired in the lastest net tree and lastest net bpf titled BUG:
unable to handle kernel paging request in bpf_probe_read_compat_str
If you fix this issue, please add the following tag to the commit:
Reported-by: xingwei Lee
t; > > > Hi
> > > > > > >
> > > > > > > On Tue, May 19, 2020 at 6:04 PM Lucas Stach
> > > > > > >
> > > > > > wrote:
> > > > > > > > Am Dienstag, den 19.05.2020, 17:41 +0800 schrieb Shengjiu
; > > > >
> > > > > wrote:
> > > > > > > Am Dienstag, den 19.05.2020, 17:41 +0800 schrieb Shengjiu Wang:
> > > > > > > > There are two requirements that we need to move the
> > > > > > > > request of dma
idelot ; Florian
> Fainelli ; Claudiu Manoil ;
> Alexandre Belloni ;
> unglinuxdri...@microchip.com; linux-...@vger.kernel.org;
> linux-kernel@vger.kernel.org
> Subject: Re: [net-next 1/3] net: dsa: optimize tx timestamp request handling
>
> On Fri, Apr 16, 2021 at 08:36:53PM +0800, Ya
n Fainelli ; Claudiu Manoil
> ; Alexandre Belloni
> ; unglinuxdri...@microchip.com;
> linux-...@vger.kernel.org; linux-kernel@vger.kernel.org
> Subject: Re: [net-next 1/3] net: dsa: optimize tx timestamp request handling
>
> On Fri, Apr 16, 2021 at 08:36:53PM +0800, Yangbo Lu wrote:
> >
Didelot ; Florian Fainelli ;
> Claudiu Manoil ; Alexandre Belloni
> ; unglinuxdri...@microchip.com;
> linux-...@vger.kernel.org; linux-kernel@vger.kernel.org
> Subject: Re: [net-next 1/3] net: dsa: optimize tx timestamp request handling
>
> On Sun, Apr 18, 2021 at 12:18:42PM +0300, Vladimi
From: Pawel Laszczak
Patch fixes lack of removing request from ep->pending_list on failure
of the stop endpoint command. Driver even after failing this command
must remove request from ep->pending_list.
Without this fix driver can stuck in cdnsp_gadget_ep_disable function
in loop:
On 21-04-19 09:53:11, Pawel Laszczak wrote:
> From: Pawel Laszczak
>
> Patch fixes lack of removing request from ep->pending_list on failure
> of the stop endpoint command. Driver even after failing this command
> must remove request from ep->pending_list.
> Without this
>
>>>>>> If an error is received when issuing a start or update transfer
>>>>>> command, the error handler will stop all active requests (including
>>>>>> the current USB request), and call dwc3_gadget_giveback() to notify
>>>>
g a start or update transfer
>>>>> command, the error handler will stop all active requests (including
>>>>> the current USB request), and call dwc3_gadget_giveback() to notify
>>>>> function drivers of the requests which have been stopped. Avoid
>&g
Hi Josh,
I've adjusted the actual file entries now. Can you please pull the new firmware
from the following URL?
git://git.chelsio.net/pub/git/linux-firmware.git for-upstream
Thanks,
Raju
The following changes since commit f66adc3cde7ee0607ea9198ca460031d3564fb33:
Merge branch 'main' of
git
Josh, thanks for pointing that out. I'll send the new pull request shortly.
-Raju
-Original Message-
From: Josh Boyer
Sent: Monday, 19 April, 2021 19:28
To: Raju Rangoju
Cc: linux-firmw...@kernel.org; linux-kernel@vger.kernel.org; Ramaraju
Yelavarthy ; Rahul Lakkireddy
Subjec
revert the original change, just use pcie_set_readrq() now instead of
changing the PCIe capability register directly.
Fixes: 2df49d365498 ("r8169: remove fiddling with the PCIe max read request
size")
Signed-off-by: Heiner Kallweit
Signed-off-by: David S. Miller
Signed-off-by: S
From: Heiner Kallweit
[ Upstream commit 2df49d36549808a7357ad9f78b7a8e39516e7809 ]
The attempt to improve performance by changing the PCIe max read request
size was added in the vendor driver more than 10 years back and copied
to r8169 driver. In the vendor driver this has been removed long ago
From: Heiner Kallweit
[ Upstream commit 5e00e16cb98935bcf06f51931876d898c226f65c ]
So far we don't increase the max read request size if we switch to
jumbo mode before bringing up the interface for the first time.
Let's change this.
Signed-off-by: Heiner Kallweit
Signed-off-by: Jaku
From: Heiner Kallweit
[ Upstream commit 5e00e16cb98935bcf06f51931876d898c226f65c ]
So far we don't increase the max read request size if we switch to
jumbo mode before bringing up the interface for the first time.
Let's change this.
Signed-off-by: Heiner Kallweit
Signed-off-by: Jaku
From: Heiner Kallweit
[ Upstream commit 5e00e16cb98935bcf06f51931876d898c226f65c ]
So far we don't increase the max read request size if we switch to
jumbo mode before bringing up the interface for the first time.
Let's change this.
Signed-off-by: Heiner Kallweit
Signed-off-by: Jaku
t; > Am Mittwoch, den 20.05.2020, 16:20 +0800 schrieb Shengjiu Wang:
> > > > > Hi
> > > > >
> > > > > On Tue, May 19, 2020 at 6:04 PM Lucas Stach
> > > > >
> > > > wrote:
> > > > > > Am Dienstag, den 19.05.2020,
From: Pawel Laszczak
Patch fixes lack of removing request from ep->pending_list on failure
of the stop endpoint command. Driver even after failing this command
must remove request from ep->pending_list.
Without this fix driver can stuck in cdnsp_gadget_ep_disable function
in loop:
gt; >
> > > > On Tue, May 19, 2020 at 6:04 PM Lucas Stach
> > > >
> > > wrote:
> > > > > Am Dienstag, den 19.05.2020, 17:41 +0800 schrieb Shengjiu Wang:
> > > > > > There are two requirements that we need to move the request of
&g
On Fri Apr 16 2021, Yangbo Lu wrote:
> Optimization could be done on dsa_skb_tx_timestamp(), and dsa device
> drivers should adapt to it.
>
> - Check SKBTX_HW_TSTAMP request flag at the very beginning, instead of in
> port_txtstamp, so that most skbs not requiring tx timest
On Sun, Apr 18, 2021 at 12:18:42PM +0300, Vladimir Oltean wrote:
>
> How about not passing "clone" back to DSA as an argument by reference,
> but instead require the driver to populate DSA_SKB_CB(skb)->clone if it
> needs to do so?
>
> Also, how about changing the return type to void? Returning t
and pass them to the
> - * switch driver
> - */
> + /* Handle tx timestamp request if has */
"if has" what?
> dsa_skb_tx_timestamp(p, skb);
>
> if (dsa_realloc_skb(skb, dev)) {
> --
> 2.25.1
>
Thanks,
Richard
On Fri, Apr 16, 2021 at 08:36:53PM +0800, Yangbo Lu wrote:
> Optimization could be done on dsa_skb_tx_timestamp(), and dsa device
> drivers should adapt to it.
>
> - Check SKBTX_HW_TSTAMP request flag at the very beginning, instead of in
> port_txtstamp, so that most skbs n
ed priority of an interrupt thread
which can be applied upfront.
There are ~5400 instances of request*irq() in the kernel source and
there is no way to make priority decisions for them which work for every
RT system out there.
The kernel sets a default and the system designer, admin, user has to
t
On Fri, Apr 16, 2021 at 08:36:53PM +0800, Yangbo Lu wrote:
> Optimization could be done on dsa_skb_tx_timestamp(), and dsa device
> drivers should adapt to it.
>
> - Check SKBTX_HW_TSTAMP request flag at the very beginning, instead of in
> port_txtstamp, so that most skbs n
The pull request you sent on Sat, 17 Apr 2021 19:18:16 +0200:
> git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git i2c/for-current
has been merged into torvalds/linux.git:
https://git.kernel.org/torvalds/c/194cf4825638256e9afe1d360831aa5379b3517a
Thank you!
--
Deet-doot-dot, I a
Linus,
here is one more driver bugfix for I2C.
Please pull.
Thanks,
Wolfram
The following changes since commit d434405aaab7d0ebc516b68a8fc4100922d7f5ef:
Linux 5.12-rc7 (2021-04-11 15:16:13 -0700)
are available in the Git repository at:
git://git.kernel.org/pub/scm/linux/kernel/git/w
From: Ohad Sharabi
This refactor is needed due to the dynamic FW load in which requesting
the FW file (and getting its attributes) is not immediately followed by
copying FW file content.
Signed-off-by: Ohad Sharabi
Reviewed-by: Oded Gabbay
Signed-off-by: Oded Gabbay
---
drivers/misc/habanala
There is a minor race in setting the fuse out request error
between fuse_abort_conn() and fuse_dev_do_read() as explained
below.
Thread-1 Thread-2
->fuse_simple_request() ->shutdown
->__fuse_req
Optimization could be done on dsa_skb_tx_timestamp(), and dsa device
drivers should adapt to it.
- Check SKBTX_HW_TSTAMP request flag at the very beginning, instead of in
port_txtstamp, so that most skbs not requiring tx timestamp just return.
- No longer to identify PTP packets, and limit tx
On 4/14/21 9:43 PM, Lin Feng wrote:
> Since commit 01e99aeca39796003 'blk-mq: insert passthrough request into
> hctx->dispatch directly', passthrough request should not appear in
> IO-scheduler any more, so blk_rq_is_passthrough checking in addon IO
> schedulers is red
On 4/14/21 9:39 PM, Lin Feng wrote:
> Commit 01e99aeca39796003 ("blk-mq: insert passthrough request into
> hctx->dispatch directly") gives high priority to passthrough requests and
> bypass underlying IO scheduler. But as we allocate tag for such request it
> still run
A will only work for usecase A and
be completely wrong for all others.
> Further, what if irq handlear thread has to run on the expected priority
> at the very beginning? This patch helps.
There is no such thing as the expected priority of an interrupt thread
which can be applied upfront.
There are ~
On 4/16/21 6:57 AM, chensong wrote:
>
>
> On 2021/4/13 下午4:39, Thomas Gleixner wrote:
>> On Tue, Apr 13 2021 at 14:19, Song Chen wrote:
>>> In general, irq handler thread will be assigned a default priority which
>>> is MAX_RT_PRIO/2, as a result, no one can preempt others.
>>>
>>> Here is the ca
On 2021/4/13 下午4:39, Thomas Gleixner wrote:
On Tue, Apr 13 2021 at 14:19, Song Chen wrote:
In general, irq handler thread will be assigned a default priority which
is MAX_RT_PRIO/2, as a result, no one can preempt others.
Here is the case I found in a real project, an interrupt int_a is
comi
On Thu, Apr 15, 2021 at 11:43:26AM +0800, Lin Feng wrote:
> Since commit 01e99aeca39796003 'blk-mq: insert passthrough request into
> hctx->dispatch directly', passthrough request should not appear in
> IO-scheduler any more, so blk_rq_is_passthrough checking in addon IO
>
On Thu, Apr 15, 2021 at 11:39:20AM +0800, Lin Feng wrote:
> Commit 01e99aeca39796003 ("blk-mq: insert passthrough request into
> hctx->dispatch directly") gives high priority to passthrough requests and
> bypass underlying IO scheduler. But as we allocate tag for such requ
Thinh Nguyen wrote:
> Wesley Cheng wrote:
>>
>>
>> On 4/14/2021 11:26 PM, Felipe Balbi wrote:
>>> Wesley Cheng writes:
>>>
>>>> If an error is received when issuing a start or update transfer
>>>> command, the error handler will s
Wesley Cheng wrote:
>
>
> On 4/14/2021 11:26 PM, Felipe Balbi wrote:
>> Wesley Cheng writes:
>>
>>> If an error is received when issuing a start or update transfer
>>> command, the error handler will stop all active requests (including
>>> the cur
On 4/14/2021 11:26 PM, Felipe Balbi wrote:
> Wesley Cheng writes:
>
>> If an error is received when issuing a start or update transfer
>> command, the error handler will stop all active requests (including
>> the current USB request), and call dwc3_gadget_giveback
Hi,
On Mon, Mar 29, 2021 at 06:50:46PM +0100, Paul Cercueil wrote:
> Avoid requesting a full modeset if the sharpness property is not
> modified, because then we don't actually need it.
>
> Fixes: fc1acf317b01 ("drm/ingenic: Add support for the IPU")
> Cc: # 5.8+
> Signed-off-by: Paul Cercueil
On Thu, Apr 15, 2021 at 2:07 PM Paolo Bonzini wrote:
>
> On 15/04/21 02:59, Lai Jiangshan wrote:
> > The next call to inject_pending_event() will reach here AT FIRST with
> > vcpu->arch.exception.injected==false and vcpu->arch.exception.pending==false
> >
> >> ... if (!vcpu->arch.excepti
Wesley Cheng writes:
> If an error is received when issuing a start or update transfer
> command, the error handler will stop all active requests (including
> the current USB request), and call dwc3_gadget_giveback() to notify
> function drivers of the requests which have been sto
On 15/04/21 02:59, Lai Jiangshan wrote:
The next call to inject_pending_event() will reach here AT FIRST with
vcpu->arch.exception.injected==false and vcpu->arch.exception.pending==false
... if (!vcpu->arch.exception.pending) {
if (vcpu->arch.nmi_injected) {
Since commit 01e99aeca39796003 'blk-mq: insert passthrough request into
hctx->dispatch directly', passthrough request should not appear in
IO-scheduler any more, so blk_rq_is_passthrough checking in addon IO
schedulers is redundant.
(Notes: this patch passes generic IO load test wi
Commit 01e99aeca39796003 ("blk-mq: insert passthrough request into
hctx->dispatch directly") gives high priority to passthrough requests and
bypass underlying IO scheduler. But as we allocate tag for such request it
still runs io-scheduler's callback limit_depth, while we real
1 - 100 of 5707 matches
Mail list logo