[PATCH 4/5] server: Added server side mamed pipe read and write implementation and use it for message mode pipes.

Jacek Caban jacek at codeweavers.com
Mon Mar 13 07:31:47 CDT 2017


On 28.02.2017 20:46, Sebastian Lackner wrote:
> On 28.02.2017 18:10, Jacek Caban wrote:
>> On 28.02.2017 16:44, Sebastian Lackner wrote:
>>> Well, there are also situations where it would be relatively easy to avoid
>>> the roundtrip. We could return immediately at least when no events, APCs or
>>> completions are involved, right? Since this is the most common situation it
>>> might still be worth the effort.
>> If event is not passed, we still need to signal object itself. Thinking
>> about it some more, I guess we could just signal object it before
>> returning for non-overlapped operations returning immediately. It
>> wouldn't be exactly right, but it can't be used for synchronization
>> reliably in this case anyway.
>>
>> Another thing is that we currently don't know if there is user APC until
>> APC_ASYNC_IO returns, but that can be fixed.
>>
>> Since it's just an optimization, can we process with the patch itself
>> first? I'm happy to optimize that, but I find it hard to agree that an
>> optimization should block inclusion of patches targeted at correctness.
> I'm not saying that it should block anything, I will leave the design decision
> up to Alexandre. Nevertheless, I think it certainly makes sense to discuss
> about such aspects in advance. Depending on the solution used it might also
> require refactoring of this patch, which is easier to do before it was accepted.


I drafted an optimization discussed in this thread. It's just a
proof-of-concept. A better implementation will store user APCs on server
side when the async is created and I'd consider moving handling of
returned data and wait handle out of fd-specific ops. I ran it on my
benchmark from last year [1] and it was 2 times slower than what I
called "plain wine+msgmode patchset+optimizations". It's expected, since
with that we need two server calls (that one needed one). Going down to
one server round-trip for simple cases is easy with that and should give
about equal results.


The point is, named pipe code needs just small changes for such
optimization, no refactoring. I hope that addresses your concerns.


Jacek


[1] https://www.winehq.org/pipermail/wine-devel/2016-October/115071.html

-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://www.winehq.org/pipermail/wine-devel/attachments/20170313/0c9cf2d1/attachment.html>
-------------- next part --------------
commit 19672398dc2dafd6480d9c84ea4231e363372fe1
Author: Jacek Caban <jacek at codeweavers.com>
Date:   Mon Mar 13 11:23:50 2017 +0100

    server: Proof-of-concept immediate return optimization.

diff --git a/dlls/ntdll/file.c b/dlls/ntdll/file.c
index 8738499..e9d75b3 100644
--- a/dlls/ntdll/file.c
+++ b/dlls/ntdll/file.c
@@ -579,6 +579,11 @@ static NTSTATUS server_read_file( HANDLE handle, HANDLE event, PIO_APC_ROUTINE a
         status = wine_server_call( req );
         wait_handle = wine_server_ptr_handle( reply->wait );
         options     = reply->options;
+        if (wait_handle && status != STATUS_PENDING)
+        {
+            io->u.Status    = status;
+            io->Information = wine_server_reply_size( reply );
+        }
     }
     SERVER_END_REQ;
 
@@ -586,9 +591,23 @@ static NTSTATUS server_read_file( HANDLE handle, HANDLE event, PIO_APC_ROUTINE a
 
     if (wait_handle)
     {
-        NtWaitForSingleObject( wait_handle, (options & FILE_SYNCHRONOUS_IO_ALERT), NULL );
-        status = io->u.Status;
-        NtClose( wait_handle );
+        if (status == STATUS_PENDING)
+        {
+            NtWaitForSingleObject( wait_handle, (options & FILE_SYNCHRONOUS_IO_ALERT), NULL );
+            status = io->u.Status;
+            NtClose( wait_handle );
+        }
+        else
+        {
+            SERVER_START_REQ( async_complete )
+            {
+                req->handle  = wine_server_obj_handle( wait_handle );
+                req->apc     = wine_server_client_ptr( apc );
+                req->apc_arg = wine_server_client_ptr( apc_context );
+                status = wine_server_call( req );
+            }
+            SERVER_END_REQ;
+        }
     }
 
     return status;
@@ -626,6 +645,11 @@ static NTSTATUS server_write_file( HANDLE handle, HANDLE event, PIO_APC_ROUTINE
         status = wine_server_call( req );
         wait_handle = wine_server_ptr_handle( reply->wait );
         options     = reply->options;
+        if (wait_handle && status != STATUS_PENDING)
+        {
+            io->u.Status    = status;
+            io->Information = reply->size;
+        }
     }
     SERVER_END_REQ;
 
@@ -633,9 +657,23 @@ static NTSTATUS server_write_file( HANDLE handle, HANDLE event, PIO_APC_ROUTINE
 
     if (wait_handle)
     {
-        NtWaitForSingleObject( wait_handle, (options & FILE_SYNCHRONOUS_IO_ALERT), NULL );
-        status = io->u.Status;
-        NtClose( wait_handle );
+        if (status == STATUS_PENDING)
+        {
+            NtWaitForSingleObject( wait_handle, (options & FILE_SYNCHRONOUS_IO_ALERT), NULL );
+            status = io->u.Status;
+            NtClose( wait_handle );
+        }
+        else
+        {
+            SERVER_START_REQ( async_complete )
+            {
+                req->handle  = wine_server_obj_handle( wait_handle );
+                req->apc     = wine_server_client_ptr( apc );
+                req->apc_arg = wine_server_client_ptr( apc_context );
+                status = wine_server_call( req );
+            }
+            SERVER_END_REQ;
+        }
     }
 
     return status;
diff --git a/server/async.c b/server/async.c
index e113681..57c93cf 100644
--- a/server/async.c
+++ b/server/async.c
@@ -180,19 +180,22 @@ void async_terminate( struct async *async, unsigned int status )
     async->status = status;
     if (async->iosb && async->iosb->status == STATUS_PENDING) async->iosb->status = status;
 
-    if (async->data.callback)
+    if (async->queue)
     {
-        apc_call_t data;
-
-        memset( &data, 0, sizeof(data) );
-        data.type            = APC_ASYNC_IO;
-        data.async_io.func   = async->data.callback;
-        data.async_io.user   = async->data.arg;
-        data.async_io.sb     = async->data.iosb;
-        data.async_io.status = status;
-        thread_queue_apc( async->thread, &async->obj, &data );
+        if (async->data.callback)
+        {
+            apc_call_t data;
+
+            memset( &data, 0, sizeof(data) );
+            data.type            = APC_ASYNC_IO;
+            data.async_io.func   = async->data.callback;
+            data.async_io.user   = async->data.arg;
+            data.async_io.sb     = async->data.iosb;
+            data.async_io.status = status;
+            thread_queue_apc( async->thread, &async->obj, &data );
+        }
+        else async_set_result( &async->obj, STATUS_SUCCESS, 0, 0, 0 );
     }
-    else async_set_result( &async->obj, STATUS_SUCCESS, 0, 0, 0 );
 
     async_reselect( async );
     if (async->queue) release_object( async );  /* so that it gets destroyed when the async is done */
@@ -235,7 +238,7 @@ void queue_async( struct async_queue *queue, struct async *async )
 {
     async->queue = (struct async_queue *)grab_object( queue );
 
-    grab_object( async );
+    if (async->status == STATUS_PENDING) grab_object( async );
     list_add_tail( &queue->queue, &async->queue_entry );
 
     if (queue->fd) set_fd_signaled( queue->fd, 0 );
@@ -528,3 +531,19 @@ DECL_HANDLER(get_async_result)
     reply->size = iosb->result;
     set_error( iosb->status );
 }
+
+DECL_HANDLER(async_complete)
+{
+    struct async *async;
+
+    if (!(async = (struct async *)get_handle_obj( current->process, req->handle, 0, &async_ops ))) return;
+    close_handle( current->process, req->handle );
+
+    if (async->iosb)
+    {
+        if (async->iosb->status != STATUS_PENDING)
+            async_set_result( &async->obj, async->iosb->status, async->iosb->result, req->apc, req->apc_arg );
+        set_error( async->iosb->status );
+    }
+    release_object( async );
+}
diff --git a/server/fd.c b/server/fd.c
index bcb1bad..7336823 100644
--- a/server/fd.c
+++ b/server/fd.c
@@ -2480,6 +2480,7 @@ DECL_HANDLER(write)
         {
             reply->wait    = fd->fd_ops->write( fd, async, req->blocking, req->pos );
             reply->options = fd->options;
+            reply->size    = iosb->result;
             release_object( async );
         }
         release_object( iosb );
diff --git a/server/named_pipe.c b/server/named_pipe.c
index 0408077..a3f31bf 100644
--- a/server/named_pipe.c
+++ b/server/named_pipe.c
@@ -745,11 +745,10 @@ static int ignore_reselect;
 
 static void reselect_write_queue( struct pipe_end *pipe_end );
 
-static void reselect_read_queue( struct pipe_end *pipe_end )
+static void reselect_read_queue( struct pipe_end *pipe_end, int read_done )
 {
     struct async *async;
     struct iosb *iosb;
-    int read_done = 0;
 
     ignore_reselect = 1;
     while (!list_empty( &pipe_end->message_queue) && (async = find_pending_async( pipe_end->read_q )))
@@ -799,13 +798,14 @@ static void reselect_write_queue( struct pipe_end *pipe_end )
     }
 
     ignore_reselect = 0;
-    reselect_read_queue( reader );
+    reselect_read_queue( reader, 0 );
 }
 
 static obj_handle_t pipe_end_read( struct fd *fd, struct async *async, int blocking, file_pos_t pos )
 {
     struct pipe_end *pipe_end = get_fd_user( fd );
     obj_handle_t handle = 0;
+    struct iosb *iosb;
 
     if (!use_server_io( pipe_end )) return no_fd_read( fd, async, blocking, pos );
 
@@ -818,21 +818,35 @@ static obj_handle_t pipe_end_read( struct fd *fd, struct async *async, int block
     if (!pipe_end->read_q && !(pipe_end->read_q = create_async_queue( fd ))) return 0;
     if (!(handle = alloc_handle( current->process, async, SYNCHRONIZE, 0 ))) return 0;
 
+    iosb = async_get_iosb( async );
+
+    if (!list_empty( &pipe_end->message_queue ))
+    {
+        message_queue_read( pipe_end, iosb );
+        async_terminate( async, iosb->status );
+        reselect_read_queue( pipe_end, 1 );
+    }
+    else
+    {
+        reselect_read_queue( pipe_end, 0 );
+    }
     queue_async( pipe_end->read_q, async );
-    reselect_read_queue( pipe_end );
-    set_error( STATUS_PENDING );
 
-    if (!blocking)
+    if (iosb->status != STATUS_PENDING)
     {
-        struct iosb *iosb;
-        iosb = async_get_iosb( async );
-        if (iosb->status == STATUS_PENDING)
+        set_reply_data_ptr( iosb->out_data, iosb->out_size );
+        iosb->out_data = NULL;
+    }
+    else
+    {
+        if (!blocking)
         {
             close_handle( current->process, handle );
             handle = 0;
         }
-        release_object( iosb );
     }
+    set_error( iosb->status );
+    release_object( iosb );
     return handle;
 }
 
@@ -842,6 +856,7 @@ static obj_handle_t pipe_end_write( struct fd *fd, struct async *async, int bloc
     struct pipe_end *read_end = write_end->connection;
     struct pipe_message *message;
     obj_handle_t handle = 0;
+    struct iosb *iosb;
 
     if (!use_server_io( write_end )) return no_fd_write( fd, async, blocking, pos );
 
@@ -864,21 +879,20 @@ static obj_handle_t pipe_end_write( struct fd *fd, struct async *async, int bloc
     message->read_pos = 0;
     list_add_tail( &read_end->message_queue, &message->entry );
 
-    queue_async( write_end->write_q, async );
     reselect_write_queue( write_end );
-    set_error( STATUS_PENDING );
+    queue_async( write_end->write_q, async );
 
-    if (!blocking)
+    iosb = async_get_iosb( async );
+    if (iosb->status == STATUS_PENDING)
     {
-        struct iosb *iosb;
-        iosb = async_get_iosb( async );
-        if (iosb->status == STATUS_PENDING)
+        if(!blocking)
         {
             close_handle( current->process, handle );
             handle = 0;
         }
-        release_object( iosb );
     }
+    set_error( iosb->status );
+    release_object( iosb );
     return handle;
 }
 
@@ -900,7 +914,7 @@ static void pipe_end_reselect_async( struct fd *fd, struct async_queue *queue )
     else if (pipe_end->write_q && pipe_end->write_q == queue)
         reselect_write_queue( pipe_end );
     else if (pipe_end->read_q && pipe_end->read_q == queue)
-        reselect_read_queue( pipe_end );
+        reselect_read_queue( pipe_end, 0 );
 }
 
 static inline int is_overlapped( unsigned int options )
diff --git a/server/protocol.def b/server/protocol.def
index 60865a6..d204fcf 100644
--- a/server/protocol.def
+++ b/server/protocol.def
@@ -2359,6 +2359,13 @@ enum message_type
 @END
 
 
+ at REQ(async_complete)
+    obj_handle_t handle;
+    client_ptr_t apc;
+    client_ptr_t apc_arg;
+ at END
+
+
 /* Perform a read on a file object */
 @REQ(read)
     int            blocking;      /* whether it's a blocking read */


More information about the wine-devel mailing list