source: vendor/current/source3/libsmb/cli_np_tstream.c@ 740

Last change on this file since 740 was 740, checked in by Silvan Scherrer, 12 years ago

Samba Server: update vendor to 3.6.0

File size: 27.5 KB
Line 
1/*
2 Unix SMB/CIFS implementation.
3
4 Copyright (C) Stefan Metzmacher 2010
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
18*/
19
20#include "includes.h"
21#include "system/network.h"
22#include "libsmb/libsmb.h"
23#include "../lib/util/tevent_ntstatus.h"
24#include "../lib/tsocket/tsocket.h"
25#include "../lib/tsocket/tsocket_internal.h"
26#include "cli_np_tstream.h"
27
28static const struct tstream_context_ops tstream_cli_np_ops;
29
30/*
31 * Windows uses 4280 (the max xmit/recv size negotiated on DCERPC).
32 * This is fits into the max_xmit negotiated at the SMB layer.
33 *
34 * On the sending side they may use SMBtranss if the request does not
35 * fit into a single SMBtrans call.
36 *
37 * Windows uses 1024 as max data size of a SMBtrans request and then
38 * possibly reads the rest of the DCERPC fragment (up to 3256 bytes)
39 * via a SMBreadX.
40 *
41 * For now we just ask for the full 4280 bytes (max data size) in the SMBtrans
42 * request to get the whole fragment at once (like samba 3.5.x and below did.
43 *
44 * It is important that we use do SMBwriteX with the size of a full fragment,
45 * otherwise we may get NT_STATUS_PIPE_BUSY on the SMBtrans request
46 * from NT4 servers. (See bug #8195)
47 */
48#define TSTREAM_CLI_NP_MAX_BUF_SIZE 4280
49
50struct tstream_cli_np {
51 struct cli_state *cli;
52 const char *npipe;
53 uint16_t fnum;
54 unsigned int default_timeout;
55
56 struct {
57 bool active;
58 struct tevent_req *read_req;
59 struct tevent_req *write_req;
60 uint16_t setup[2];
61 } trans;
62
63 struct {
64 off_t ofs;
65 size_t left;
66 uint8_t *buf;
67 } read, write;
68};
69
70static int tstream_cli_np_destructor(struct tstream_cli_np *cli_nps)
71{
72 NTSTATUS status;
73
74 if (!cli_state_is_connected(cli_nps->cli)) {
75 return 0;
76 }
77
78 /*
79 * TODO: do not use a sync call with a destructor!!!
80 *
81 * This only happens, if a caller does talloc_free(),
82 * while the everything was still ok.
83 *
84 * If we get an unexpected failure within a normal
85 * operation, we already do an async cli_close_send()/_recv().
86 *
87 * Once we've fixed all callers to call
88 * tstream_disconnect_send()/_recv(), this will
89 * never be called.
90 */
91 status = cli_close(cli_nps->cli, cli_nps->fnum);
92 if (!NT_STATUS_IS_OK(status)) {
93 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
94 "failed on pipe %s. Error was %s\n",
95 cli_nps->npipe, nt_errstr(status)));
96 }
97 /*
98 * We can't do much on failure
99 */
100 return 0;
101}
102
103struct tstream_cli_np_open_state {
104 struct cli_state *cli;
105 uint16_t fnum;
106 const char *npipe;
107};
108
109static void tstream_cli_np_open_done(struct tevent_req *subreq);
110
111struct tevent_req *tstream_cli_np_open_send(TALLOC_CTX *mem_ctx,
112 struct tevent_context *ev,
113 struct cli_state *cli,
114 const char *npipe)
115{
116 struct tevent_req *req;
117 struct tstream_cli_np_open_state *state;
118 struct tevent_req *subreq;
119
120 req = tevent_req_create(mem_ctx, &state,
121 struct tstream_cli_np_open_state);
122 if (!req) {
123 return NULL;
124 }
125 state->cli = cli;
126
127 state->npipe = talloc_strdup(state, npipe);
128 if (tevent_req_nomem(state->npipe, req)) {
129 return tevent_req_post(req, ev);
130 }
131
132 subreq = cli_ntcreate_send(state, ev, cli,
133 npipe,
134 0,
135 DESIRED_ACCESS_PIPE,
136 0,
137 FILE_SHARE_READ|FILE_SHARE_WRITE,
138 FILE_OPEN,
139 0,
140 0);
141 if (tevent_req_nomem(subreq, req)) {
142 return tevent_req_post(req, ev);
143 }
144 tevent_req_set_callback(subreq, tstream_cli_np_open_done, req);
145
146 return req;
147}
148
149static void tstream_cli_np_open_done(struct tevent_req *subreq)
150{
151 struct tevent_req *req =
152 tevent_req_callback_data(subreq, struct tevent_req);
153 struct tstream_cli_np_open_state *state =
154 tevent_req_data(req, struct tstream_cli_np_open_state);
155 NTSTATUS status;
156
157 status = cli_ntcreate_recv(subreq, &state->fnum);
158 TALLOC_FREE(subreq);
159 if (!NT_STATUS_IS_OK(status)) {
160 tevent_req_nterror(req, status);
161 return;
162 }
163
164 tevent_req_done(req);
165}
166
167NTSTATUS _tstream_cli_np_open_recv(struct tevent_req *req,
168 TALLOC_CTX *mem_ctx,
169 struct tstream_context **_stream,
170 const char *location)
171{
172 struct tstream_cli_np_open_state *state =
173 tevent_req_data(req, struct tstream_cli_np_open_state);
174 struct tstream_context *stream;
175 struct tstream_cli_np *cli_nps;
176 NTSTATUS status;
177
178 if (tevent_req_is_nterror(req, &status)) {
179 tevent_req_received(req);
180 return status;
181 }
182
183 stream = tstream_context_create(mem_ctx,
184 &tstream_cli_np_ops,
185 &cli_nps,
186 struct tstream_cli_np,
187 location);
188 if (!stream) {
189 tevent_req_received(req);
190 return NT_STATUS_NO_MEMORY;
191 }
192 ZERO_STRUCTP(cli_nps);
193
194 cli_nps->cli = state->cli;
195 cli_nps->npipe = talloc_move(cli_nps, &state->npipe);
196 cli_nps->fnum = state->fnum;
197 cli_nps->default_timeout = state->cli->timeout;
198
199 talloc_set_destructor(cli_nps, tstream_cli_np_destructor);
200
201 cli_nps->trans.active = false;
202 cli_nps->trans.read_req = NULL;
203 cli_nps->trans.write_req = NULL;
204 SSVAL(cli_nps->trans.setup+0, 0, TRANSACT_DCERPCCMD);
205 SSVAL(cli_nps->trans.setup+1, 0, cli_nps->fnum);
206
207 *_stream = stream;
208 tevent_req_received(req);
209 return NT_STATUS_OK;
210}
211
212static ssize_t tstream_cli_np_pending_bytes(struct tstream_context *stream)
213{
214 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
215 struct tstream_cli_np);
216
217 if (!cli_state_is_connected(cli_nps->cli)) {
218 errno = ENOTCONN;
219 return -1;
220 }
221
222 return cli_nps->read.left;
223}
224
225bool tstream_is_cli_np(struct tstream_context *stream)
226{
227 struct tstream_cli_np *cli_nps =
228 talloc_get_type(_tstream_context_data(stream),
229 struct tstream_cli_np);
230
231 if (!cli_nps) {
232 return false;
233 }
234
235 return true;
236}
237
238NTSTATUS tstream_cli_np_use_trans(struct tstream_context *stream)
239{
240 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
241 struct tstream_cli_np);
242
243 if (cli_nps->trans.read_req) {
244 return NT_STATUS_PIPE_BUSY;
245 }
246
247 if (cli_nps->trans.write_req) {
248 return NT_STATUS_PIPE_BUSY;
249 }
250
251 if (cli_nps->trans.active) {
252 return NT_STATUS_PIPE_BUSY;
253 }
254
255 cli_nps->trans.active = true;
256
257 return NT_STATUS_OK;
258}
259
260unsigned int tstream_cli_np_set_timeout(struct tstream_context *stream,
261 unsigned int timeout)
262{
263 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
264 struct tstream_cli_np);
265
266 if (!cli_state_is_connected(cli_nps->cli)) {
267 return cli_nps->default_timeout;
268 }
269
270 return cli_set_timeout(cli_nps->cli, timeout);
271}
272
273struct cli_state *tstream_cli_np_get_cli_state(struct tstream_context *stream)
274{
275 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
276 struct tstream_cli_np);
277
278 return cli_nps->cli;
279}
280
281struct tstream_cli_np_writev_state {
282 struct tstream_context *stream;
283 struct tevent_context *ev;
284
285 struct iovec *vector;
286 size_t count;
287
288 int ret;
289
290 struct {
291 int val;
292 const char *location;
293 } error;
294};
295
296static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state *state)
297{
298 struct tstream_cli_np *cli_nps =
299 tstream_context_data(state->stream,
300 struct tstream_cli_np);
301
302 cli_nps->trans.write_req = NULL;
303
304 return 0;
305}
306
307static void tstream_cli_np_writev_write_next(struct tevent_req *req);
308
309static struct tevent_req *tstream_cli_np_writev_send(TALLOC_CTX *mem_ctx,
310 struct tevent_context *ev,
311 struct tstream_context *stream,
312 const struct iovec *vector,
313 size_t count)
314{
315 struct tevent_req *req;
316 struct tstream_cli_np_writev_state *state;
317 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
318 struct tstream_cli_np);
319
320 req = tevent_req_create(mem_ctx, &state,
321 struct tstream_cli_np_writev_state);
322 if (!req) {
323 return NULL;
324 }
325 state->stream = stream;
326 state->ev = ev;
327 state->ret = 0;
328
329 talloc_set_destructor(state, tstream_cli_np_writev_state_destructor);
330
331 if (!cli_state_is_connected(cli_nps->cli)) {
332 tevent_req_error(req, ENOTCONN);
333 return tevent_req_post(req, ev);
334 }
335
336 /*
337 * we make a copy of the vector so we can change the structure
338 */
339 state->vector = talloc_array(state, struct iovec, count);
340 if (tevent_req_nomem(state->vector, req)) {
341 return tevent_req_post(req, ev);
342 }
343 memcpy(state->vector, vector, sizeof(struct iovec) * count);
344 state->count = count;
345
346 tstream_cli_np_writev_write_next(req);
347 if (!tevent_req_is_in_progress(req)) {
348 return tevent_req_post(req, ev);
349 }
350
351 return req;
352}
353
354static void tstream_cli_np_readv_trans_start(struct tevent_req *req);
355static void tstream_cli_np_writev_write_done(struct tevent_req *subreq);
356
357static void tstream_cli_np_writev_write_next(struct tevent_req *req)
358{
359 struct tstream_cli_np_writev_state *state =
360 tevent_req_data(req,
361 struct tstream_cli_np_writev_state);
362 struct tstream_cli_np *cli_nps =
363 tstream_context_data(state->stream,
364 struct tstream_cli_np);
365 struct tevent_req *subreq;
366 size_t i;
367 size_t left = 0;
368
369 for (i=0; i < state->count; i++) {
370 left += state->vector[i].iov_len;
371 }
372
373 if (left == 0) {
374 TALLOC_FREE(cli_nps->write.buf);
375 tevent_req_done(req);
376 return;
377 }
378
379 cli_nps->write.ofs = 0;
380 cli_nps->write.left = MIN(left, TSTREAM_CLI_NP_MAX_BUF_SIZE);
381 cli_nps->write.buf = talloc_realloc(cli_nps, cli_nps->write.buf,
382 uint8_t, cli_nps->write.left);
383 if (tevent_req_nomem(cli_nps->write.buf, req)) {
384 return;
385 }
386
387 /*
388 * copy the pending buffer first
389 */
390 while (cli_nps->write.left > 0 && state->count > 0) {
391 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
392 size_t len = MIN(cli_nps->write.left, state->vector[0].iov_len);
393
394 memcpy(cli_nps->write.buf + cli_nps->write.ofs, base, len);
395
396 base += len;
397 state->vector[0].iov_base = base;
398 state->vector[0].iov_len -= len;
399
400 cli_nps->write.ofs += len;
401 cli_nps->write.left -= len;
402
403 if (state->vector[0].iov_len == 0) {
404 state->vector += 1;
405 state->count -= 1;
406 }
407
408 state->ret += len;
409 }
410
411 if (cli_nps->trans.active && state->count == 0) {
412 cli_nps->trans.active = false;
413 cli_nps->trans.write_req = req;
414 return;
415 }
416
417 if (cli_nps->trans.read_req && state->count == 0) {
418 cli_nps->trans.write_req = req;
419 tstream_cli_np_readv_trans_start(cli_nps->trans.read_req);
420 return;
421 }
422
423 subreq = cli_write_andx_send(state, state->ev, cli_nps->cli,
424 cli_nps->fnum,
425 8, /* 8 means message mode. */
426 cli_nps->write.buf, 0,
427 cli_nps->write.ofs);
428 if (tevent_req_nomem(subreq, req)) {
429 return;
430 }
431 tevent_req_set_callback(subreq,
432 tstream_cli_np_writev_write_done,
433 req);
434}
435
436static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
437 int error,
438 const char *location);
439
440static void tstream_cli_np_writev_write_done(struct tevent_req *subreq)
441{
442 struct tevent_req *req =
443 tevent_req_callback_data(subreq, struct tevent_req);
444 struct tstream_cli_np_writev_state *state =
445 tevent_req_data(req, struct tstream_cli_np_writev_state);
446 struct tstream_cli_np *cli_nps =
447 tstream_context_data(state->stream,
448 struct tstream_cli_np);
449 size_t written;
450 NTSTATUS status;
451
452 status = cli_write_andx_recv(subreq, &written);
453 TALLOC_FREE(subreq);
454 if (!NT_STATUS_IS_OK(status)) {
455 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
456 return;
457 }
458
459 if (written != cli_nps->write.ofs) {
460 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
461 return;
462 }
463
464 tstream_cli_np_writev_write_next(req);
465}
466
467static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq);
468
469static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
470 int error,
471 const char *location)
472{
473 struct tstream_cli_np_writev_state *state =
474 tevent_req_data(req,
475 struct tstream_cli_np_writev_state);
476 struct tstream_cli_np *cli_nps =
477 tstream_context_data(state->stream,
478 struct tstream_cli_np);
479 struct tevent_req *subreq;
480
481 state->error.val = error;
482 state->error.location = location;
483
484 if (!cli_state_is_connected(cli_nps->cli)) {
485 /* return the original error */
486 _tevent_req_error(req, state->error.val, state->error.location);
487 return;
488 }
489
490 subreq = cli_close_send(state, state->ev, cli_nps->cli, cli_nps->fnum);
491 if (subreq == NULL) {
492 /* return the original error */
493 _tevent_req_error(req, state->error.val, state->error.location);
494 return;
495 }
496 tevent_req_set_callback(subreq,
497 tstream_cli_np_writev_disconnect_done,
498 req);
499}
500
501static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq)
502{
503 struct tevent_req *req =
504 tevent_req_callback_data(subreq, struct tevent_req);
505 struct tstream_cli_np_writev_state *state =
506 tevent_req_data(req, struct tstream_cli_np_writev_state);
507 struct tstream_cli_np *cli_nps =
508 tstream_context_data(state->stream, struct tstream_cli_np);
509 NTSTATUS status;
510
511 status = cli_close_recv(subreq);
512 TALLOC_FREE(subreq);
513
514 cli_nps->cli = NULL;
515
516 /* return the original error */
517 _tevent_req_error(req, state->error.val, state->error.location);
518}
519
520static int tstream_cli_np_writev_recv(struct tevent_req *req,
521 int *perrno)
522{
523 struct tstream_cli_np_writev_state *state =
524 tevent_req_data(req,
525 struct tstream_cli_np_writev_state);
526 int ret;
527
528 ret = tsocket_simple_int_recv(req, perrno);
529 if (ret == 0) {
530 ret = state->ret;
531 }
532
533 tevent_req_received(req);
534 return ret;
535}
536
537struct tstream_cli_np_readv_state {
538 struct tstream_context *stream;
539 struct tevent_context *ev;
540
541 struct iovec *vector;
542 size_t count;
543
544 int ret;
545
546 struct {
547 struct tevent_immediate *im;
548 } trans;
549
550 struct {
551 int val;
552 const char *location;
553 } error;
554};
555
556static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state *state)
557{
558 struct tstream_cli_np *cli_nps =
559 tstream_context_data(state->stream,
560 struct tstream_cli_np);
561
562 cli_nps->trans.read_req = NULL;
563
564 return 0;
565}
566
567static void tstream_cli_np_readv_read_next(struct tevent_req *req);
568
569static struct tevent_req *tstream_cli_np_readv_send(TALLOC_CTX *mem_ctx,
570 struct tevent_context *ev,
571 struct tstream_context *stream,
572 struct iovec *vector,
573 size_t count)
574{
575 struct tevent_req *req;
576 struct tstream_cli_np_readv_state *state;
577 struct tstream_cli_np *cli_nps =
578 tstream_context_data(stream, struct tstream_cli_np);
579
580 req = tevent_req_create(mem_ctx, &state,
581 struct tstream_cli_np_readv_state);
582 if (!req) {
583 return NULL;
584 }
585 state->stream = stream;
586 state->ev = ev;
587 state->ret = 0;
588
589 talloc_set_destructor(state, tstream_cli_np_readv_state_destructor);
590
591 if (!cli_state_is_connected(cli_nps->cli)) {
592 tevent_req_error(req, ENOTCONN);
593 return tevent_req_post(req, ev);
594 }
595
596 /*
597 * we make a copy of the vector so we can change the structure
598 */
599 state->vector = talloc_array(state, struct iovec, count);
600 if (tevent_req_nomem(state->vector, req)) {
601 return tevent_req_post(req, ev);
602 }
603 memcpy(state->vector, vector, sizeof(struct iovec) * count);
604 state->count = count;
605
606 tstream_cli_np_readv_read_next(req);
607 if (!tevent_req_is_in_progress(req)) {
608 return tevent_req_post(req, ev);
609 }
610
611 return req;
612}
613
614static void tstream_cli_np_readv_read_done(struct tevent_req *subreq);
615
616static void tstream_cli_np_readv_read_next(struct tevent_req *req)
617{
618 struct tstream_cli_np_readv_state *state =
619 tevent_req_data(req,
620 struct tstream_cli_np_readv_state);
621 struct tstream_cli_np *cli_nps =
622 tstream_context_data(state->stream,
623 struct tstream_cli_np);
624 struct tevent_req *subreq;
625
626 /*
627 * copy the pending buffer first
628 */
629 while (cli_nps->read.left > 0 && state->count > 0) {
630 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
631 size_t len = MIN(cli_nps->read.left, state->vector[0].iov_len);
632
633 memcpy(base, cli_nps->read.buf + cli_nps->read.ofs, len);
634
635 base += len;
636 state->vector[0].iov_base = base;
637 state->vector[0].iov_len -= len;
638
639 cli_nps->read.ofs += len;
640 cli_nps->read.left -= len;
641
642 if (state->vector[0].iov_len == 0) {
643 state->vector += 1;
644 state->count -= 1;
645 }
646
647 state->ret += len;
648 }
649
650 if (cli_nps->read.left == 0) {
651 TALLOC_FREE(cli_nps->read.buf);
652 }
653
654 if (state->count == 0) {
655 tevent_req_done(req);
656 return;
657 }
658
659 if (cli_nps->trans.active) {
660 cli_nps->trans.active = false;
661 cli_nps->trans.read_req = req;
662 return;
663 }
664
665 if (cli_nps->trans.write_req) {
666 cli_nps->trans.read_req = req;
667 tstream_cli_np_readv_trans_start(req);
668 return;
669 }
670
671 subreq = cli_read_andx_send(state, state->ev, cli_nps->cli,
672 cli_nps->fnum, 0, TSTREAM_CLI_NP_MAX_BUF_SIZE);
673 if (tevent_req_nomem(subreq, req)) {
674 return;
675 }
676 tevent_req_set_callback(subreq,
677 tstream_cli_np_readv_read_done,
678 req);
679}
680
681static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq);
682
683static void tstream_cli_np_readv_trans_start(struct tevent_req *req)
684{
685 struct tstream_cli_np_readv_state *state =
686 tevent_req_data(req,
687 struct tstream_cli_np_readv_state);
688 struct tstream_cli_np *cli_nps =
689 tstream_context_data(state->stream,
690 struct tstream_cli_np);
691 struct tevent_req *subreq;
692
693 state->trans.im = tevent_create_immediate(state);
694 if (tevent_req_nomem(state->trans.im, req)) {
695 return;
696 }
697
698 subreq = cli_trans_send(state, state->ev,
699 cli_nps->cli,
700 SMBtrans,
701 "\\PIPE\\",
702 0, 0, 0,
703 cli_nps->trans.setup, 2,
704 0,
705 NULL, 0, 0,
706 cli_nps->write.buf,
707 cli_nps->write.ofs,
708 TSTREAM_CLI_NP_MAX_BUF_SIZE);
709 if (tevent_req_nomem(subreq, req)) {
710 return;
711 }
712 tevent_req_set_callback(subreq,
713 tstream_cli_np_readv_trans_done,
714 req);
715}
716
717static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
718 int error,
719 const char *location);
720static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
721 struct tevent_immediate *im,
722 void *private_data);
723
724static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq)
725{
726 struct tevent_req *req =
727 tevent_req_callback_data(subreq, struct tevent_req);
728 struct tstream_cli_np_readv_state *state =
729 tevent_req_data(req, struct tstream_cli_np_readv_state);
730 struct tstream_cli_np *cli_nps =
731 tstream_context_data(state->stream, struct tstream_cli_np);
732 uint8_t *rcvbuf;
733 uint32_t received;
734 NTSTATUS status;
735
736 status = cli_trans_recv(subreq, state, NULL, NULL, 0, NULL,
737 NULL, 0, NULL,
738 &rcvbuf, 0, &received);
739 TALLOC_FREE(subreq);
740 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
741 status = NT_STATUS_OK;
742 }
743 if (!NT_STATUS_IS_OK(status)) {
744 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
745 return;
746 }
747
748 if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
749 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
750 return;
751 }
752
753 if (received == 0) {
754 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
755 return;
756 }
757
758 cli_nps->read.ofs = 0;
759 cli_nps->read.left = received;
760 cli_nps->read.buf = talloc_move(cli_nps, &rcvbuf);
761
762 if (cli_nps->trans.write_req == NULL) {
763 tstream_cli_np_readv_read_next(req);
764 return;
765 }
766
767 tevent_schedule_immediate(state->trans.im, state->ev,
768 tstream_cli_np_readv_trans_next, req);
769
770 tevent_req_done(cli_nps->trans.write_req);
771}
772
773static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
774 struct tevent_immediate *im,
775 void *private_data)
776{
777 struct tevent_req *req =
778 talloc_get_type_abort(private_data,
779 struct tevent_req);
780
781 tstream_cli_np_readv_read_next(req);
782}
783
784static void tstream_cli_np_readv_read_done(struct tevent_req *subreq)
785{
786 struct tevent_req *req =
787 tevent_req_callback_data(subreq, struct tevent_req);
788 struct tstream_cli_np_readv_state *state =
789 tevent_req_data(req, struct tstream_cli_np_readv_state);
790 struct tstream_cli_np *cli_nps =
791 tstream_context_data(state->stream, struct tstream_cli_np);
792 uint8_t *rcvbuf;
793 ssize_t received;
794 NTSTATUS status;
795
796 /*
797 * We must free subreq in this function as there is
798 * a timer event attached to it.
799 */
800
801 status = cli_read_andx_recv(subreq, &received, &rcvbuf);
802 /*
803 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
804 * child of that.
805 */
806 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
807 /*
808 * NT_STATUS_BUFFER_TOO_SMALL means that there's
809 * more data to read when the named pipe is used
810 * in message mode (which is the case here).
811 *
812 * But we hide this from the caller.
813 */
814 status = NT_STATUS_OK;
815 }
816 if (!NT_STATUS_IS_OK(status)) {
817 TALLOC_FREE(subreq);
818 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
819 return;
820 }
821
822 if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
823 TALLOC_FREE(subreq);
824 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
825 return;
826 }
827
828 if (received == 0) {
829 TALLOC_FREE(subreq);
830 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
831 return;
832 }
833
834 cli_nps->read.ofs = 0;
835 cli_nps->read.left = received;
836 cli_nps->read.buf = talloc_array(cli_nps, uint8_t, received);
837 if (cli_nps->read.buf == NULL) {
838 TALLOC_FREE(subreq);
839 tevent_req_nomem(cli_nps->read.buf, req);
840 return;
841 }
842 memcpy(cli_nps->read.buf, rcvbuf, received);
843 TALLOC_FREE(subreq);
844
845 tstream_cli_np_readv_read_next(req);
846}
847
848static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq);
849
850static void tstream_cli_np_readv_error(struct tevent_req *req);
851
852static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
853 int error,
854 const char *location)
855{
856 struct tstream_cli_np_readv_state *state =
857 tevent_req_data(req,
858 struct tstream_cli_np_readv_state);
859 struct tstream_cli_np *cli_nps =
860 tstream_context_data(state->stream,
861 struct tstream_cli_np);
862 struct tevent_req *subreq;
863
864 state->error.val = error;
865 state->error.location = location;
866
867 if (!cli_state_is_connected(cli_nps->cli)) {
868 /* return the original error */
869 tstream_cli_np_readv_error(req);
870 return;
871 }
872
873 subreq = cli_close_send(state, state->ev, cli_nps->cli, cli_nps->fnum);
874 if (subreq == NULL) {
875 /* return the original error */
876 tstream_cli_np_readv_error(req);
877 return;
878 }
879 tevent_req_set_callback(subreq,
880 tstream_cli_np_readv_disconnect_done,
881 req);
882}
883
884static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq)
885{
886 struct tevent_req *req =
887 tevent_req_callback_data(subreq, struct tevent_req);
888 struct tstream_cli_np_readv_state *state =
889 tevent_req_data(req, struct tstream_cli_np_readv_state);
890 struct tstream_cli_np *cli_nps =
891 tstream_context_data(state->stream, struct tstream_cli_np);
892 NTSTATUS status;
893
894 status = cli_close_recv(subreq);
895 TALLOC_FREE(subreq);
896
897 cli_nps->cli = NULL;
898
899 tstream_cli_np_readv_error(req);
900}
901
902static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
903 struct tevent_immediate *im,
904 void *private_data);
905
906static void tstream_cli_np_readv_error(struct tevent_req *req)
907{
908 struct tstream_cli_np_readv_state *state =
909 tevent_req_data(req,
910 struct tstream_cli_np_readv_state);
911 struct tstream_cli_np *cli_nps =
912 tstream_context_data(state->stream,
913 struct tstream_cli_np);
914
915 if (cli_nps->trans.write_req == NULL) {
916 /* return the original error */
917 _tevent_req_error(req, state->error.val, state->error.location);
918 return;
919 }
920
921 if (state->trans.im == NULL) {
922 /* return the original error */
923 _tevent_req_error(req, state->error.val, state->error.location);
924 return;
925 }
926
927 tevent_schedule_immediate(state->trans.im, state->ev,
928 tstream_cli_np_readv_error_trigger, req);
929
930 /* return the original error for writev */
931 _tevent_req_error(cli_nps->trans.write_req,
932 state->error.val, state->error.location);
933}
934
935static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
936 struct tevent_immediate *im,
937 void *private_data)
938{
939 struct tevent_req *req =
940 talloc_get_type_abort(private_data,
941 struct tevent_req);
942 struct tstream_cli_np_readv_state *state =
943 tevent_req_data(req,
944 struct tstream_cli_np_readv_state);
945
946 /* return the original error */
947 _tevent_req_error(req, state->error.val, state->error.location);
948}
949
950static int tstream_cli_np_readv_recv(struct tevent_req *req,
951 int *perrno)
952{
953 struct tstream_cli_np_readv_state *state =
954 tevent_req_data(req, struct tstream_cli_np_readv_state);
955 int ret;
956
957 ret = tsocket_simple_int_recv(req, perrno);
958 if (ret == 0) {
959 ret = state->ret;
960 }
961
962 tevent_req_received(req);
963 return ret;
964}
965
966struct tstream_cli_np_disconnect_state {
967 struct tstream_context *stream;
968};
969
970static void tstream_cli_np_disconnect_done(struct tevent_req *subreq);
971
972static struct tevent_req *tstream_cli_np_disconnect_send(TALLOC_CTX *mem_ctx,
973 struct tevent_context *ev,
974 struct tstream_context *stream)
975{
976 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
977 struct tstream_cli_np);
978 struct tevent_req *req;
979 struct tstream_cli_np_disconnect_state *state;
980 struct tevent_req *subreq;
981
982 req = tevent_req_create(mem_ctx, &state,
983 struct tstream_cli_np_disconnect_state);
984 if (req == NULL) {
985 return NULL;
986 }
987
988 state->stream = stream;
989
990 if (!cli_state_is_connected(cli_nps->cli)) {
991 tevent_req_error(req, ENOTCONN);
992 return tevent_req_post(req, ev);
993 }
994
995 subreq = cli_close_send(state, ev, cli_nps->cli, cli_nps->fnum);
996 if (tevent_req_nomem(subreq, req)) {
997 return tevent_req_post(req, ev);
998 }
999 tevent_req_set_callback(subreq, tstream_cli_np_disconnect_done, req);
1000
1001 return req;
1002}
1003
1004static void tstream_cli_np_disconnect_done(struct tevent_req *subreq)
1005{
1006 struct tevent_req *req = tevent_req_callback_data(subreq,
1007 struct tevent_req);
1008 struct tstream_cli_np_disconnect_state *state =
1009 tevent_req_data(req, struct tstream_cli_np_disconnect_state);
1010 struct tstream_cli_np *cli_nps =
1011 tstream_context_data(state->stream, struct tstream_cli_np);
1012 NTSTATUS status;
1013
1014 status = cli_close_recv(subreq);
1015 TALLOC_FREE(subreq);
1016 if (!NT_STATUS_IS_OK(status)) {
1017 tevent_req_error(req, EIO);
1018 return;
1019 }
1020
1021 cli_nps->cli = NULL;
1022
1023 tevent_req_done(req);
1024}
1025
1026static int tstream_cli_np_disconnect_recv(struct tevent_req *req,
1027 int *perrno)
1028{
1029 int ret;
1030
1031 ret = tsocket_simple_int_recv(req, perrno);
1032
1033 tevent_req_received(req);
1034 return ret;
1035}
1036
1037static const struct tstream_context_ops tstream_cli_np_ops = {
1038 .name = "cli_np",
1039
1040 .pending_bytes = tstream_cli_np_pending_bytes,
1041
1042 .readv_send = tstream_cli_np_readv_send,
1043 .readv_recv = tstream_cli_np_readv_recv,
1044
1045 .writev_send = tstream_cli_np_writev_send,
1046 .writev_recv = tstream_cli_np_writev_recv,
1047
1048 .disconnect_send = tstream_cli_np_disconnect_send,
1049 .disconnect_recv = tstream_cli_np_disconnect_recv,
1050};
1051
1052NTSTATUS _tstream_cli_np_existing(TALLOC_CTX *mem_ctx,
1053 struct cli_state *cli,
1054 uint16_t fnum,
1055 struct tstream_context **_stream,
1056 const char *location)
1057{
1058 struct tstream_context *stream;
1059 struct tstream_cli_np *cli_nps;
1060
1061 stream = tstream_context_create(mem_ctx,
1062 &tstream_cli_np_ops,
1063 &cli_nps,
1064 struct tstream_cli_np,
1065 location);
1066 if (!stream) {
1067 return NT_STATUS_NO_MEMORY;
1068 }
1069 ZERO_STRUCTP(cli_nps);
1070
1071 cli_nps->cli = cli;
1072 cli_nps->fnum = fnum;
1073
1074 *_stream = stream;
1075 return NT_STATUS_OK;
1076}
Note: See TracBrowser for help on using the repository browser.