varnish-cache/vmod/vmod_debug_transports.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * Copyright 2024 UPLEX - Nils Goroll Systemoptimierung
4
 * All rights reserved.
5
 *
6
 * Authors: Poul-Henning Kamp <phk@phk.freebsd.dk>
7
 *          Nils Goroll <slink@uplex.de>
8
 *
9
 * SPDX-License-Identifier: BSD-2-Clause
10
 *
11
 * Redistribution and use in source and binary forms, with or without
12
 * modification, are permitted provided that the following conditions
13
 * are met:
14
 * 1. Redistributions of source code must retain the above copyright
15
 *    notice, this list of conditions and the following disclaimer.
16
 * 2. Redistributions in binary form must reproduce the above copyright
17
 *    notice, this list of conditions and the following disclaimer in the
18
 *    documentation and/or other materials provided with the distribution.
19
 *
20
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
24
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30
 * SUCH DAMAGE.
31
 */
32
33
#include "config.h"
34
35
#include "cache/cache_varnishd.h"
36
37
#include "cache/cache_filter.h"
38
#include "cache/cache_transport.h"
39
#include "http1/cache_http1.h"
40
41
#include "vmod_debug.h"
42
43
static void
44 0
dbg_error(struct req *req, struct v1l **v1lp, const char *msg)
45
{
46
47 0
        (void)req;
48 0
        (void)v1lp;
49 0
        (void)msg;
50 0
        INCOMPL();
51 0
}
52
53
static void dbg_deliver_finish(struct req *req, struct v1l **v1lp, int err);
54
static void dbg_sendbody(struct worker *wrk, void *arg);
55
56
static task_func_t *hack_http1_req = NULL;
57
58
// copied from cache_http_deliver.c, then split & modified
59
static enum vtr_deliver_e v_matchproto_(vtr_deliver_f)
60 1280
dbg_deliver(struct req *req, int sendbody)
61
{
62
        struct vrt_ctx ctx[1];
63
        struct v1l *v1l;
64
65 1280
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
66 1280
        CHECK_OBJ_ORNULL(req->boc, BOC_MAGIC);
67 1280
        CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC);
68
69 1280
        if (req->doclose == SC_NULL &&
70 1280
            http_HdrIs(req->resp, H_Connection, "close")) {
71 0
                req->doclose = SC_RESP_CLOSE;
72 1280
        } else if (req->doclose != SC_NULL) {
73 0
                if (!http_HdrIs(req->resp, H_Connection, "close")) {
74 0
                        http_Unset(req->resp, H_Connection);
75 0
                        http_SetHeader(req->resp, "Connection: close");
76 0
                }
77 1280
        } else if (!http_GetHdr(req->resp, H_Connection, NULL))
78 1280
                http_SetHeader(req->resp, "Connection: keep-alive");
79
80 1280
        CHECK_OBJ_NOTNULL(req->wrk, WORKER_MAGIC);
81
82 2560
        v1l = V1L_Open(req->ws, &req->sp->fd, req->vsl,
83 1280
            req->t_prev + SESS_TMO(req->sp, send_timeout),
84 1280
            cache_param->http1_iovs);
85
86 1280
        if (v1l == NULL) {
87 0
                dbg_error(req, &v1l, "Failure to init v1d (workspace_thread overflow)");
88 0
                return (VTR_D_DONE);
89
        }
90
91 1280
        if (sendbody) {
92 1280
                if (!http_GetHdr(req->resp, H_Content_Length, NULL)) {
93 640
                        if (req->http->protover == 11) {
94 640
                                http_SetHeader(req->resp,
95
                                    "Transfer-Encoding: chunked");
96 640
                        } else {
97 0
                                req->doclose = SC_TX_EOF;
98
                        }
99 640
                }
100 1280
                INIT_OBJ(ctx, VRT_CTX_MAGIC);
101 1280
                VCL_Req2Ctx(ctx, req);
102 1280
                if (VDP_Push(ctx, req->vdc, req->ws, VDP_v1l, v1l)) {
103 0
                        dbg_error(req, &v1l, "Failure to push v1d processor");
104 0
                        return (VTR_D_DONE);
105
                }
106 1280
        }
107
108 1280
        if (WS_Overflowed(req->ws)) {
109 0
                dbg_error(req, &v1l, "workspace_client overflow");
110 0
                return (VTR_D_DONE);
111
        }
112
113 1280
        if (WS_Overflowed(req->sp->ws)) {
114 0
                dbg_error(req, &v1l, "workspace_session overflow");
115 0
                return (VTR_D_DONE);
116
        }
117
118 1280
        if (WS_Overflowed(req->wrk->aws)) {
119 0
                dbg_error(req, &v1l, "workspace_thread overflow");
120 0
                return (VTR_D_DONE);
121
        }
122
123 1280
        req->acct.resp_hdrbytes += HTTP1_Write(v1l, req->resp, HTTP1_Resp);
124
125 1280
        if (! sendbody) {
126 0
                dbg_deliver_finish(req, &v1l, 0);
127 0
                return (VTR_D_DONE);
128
        }
129
130 1280
        (void)V1L_Flush(v1l);
131
132 1280
        if (hack_http1_req == NULL)
133 40
                hack_http1_req = req->task->func;
134 1280
        AN(hack_http1_req);
135
136 1280
        VSLb(req->vsl, SLT_Debug, "w=%p scheduling dbg_sendbody", req->wrk);
137
138 1280
        req->task->func = dbg_sendbody;
139 1280
        req->task->priv = req;
140
141 1280
        req->wrk = NULL;
142 1280
        req->vdc->wrk = NULL;
143 1280
        req->transport_priv = v1l;
144
145 1280
        AZ(Pool_Task(req->sp->pool, req->task, TASK_QUEUE_RUSH));
146 1280
        return (VTR_D_DISEMBARK);
147 1280
}
148
149
static void v_matchproto_(task_func_t)
150 1280
dbg_sendbody(struct worker *wrk, void *arg)
151
{
152
        struct req *req;
153
        struct v1l *v1l;
154
        const char *p;
155
        int err, chunked;
156
157 1280
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
158 1280
        CAST_OBJ_NOTNULL(req, arg, REQ_MAGIC);
159 1280
        v1l = req->transport_priv;
160 1280
        req->transport_priv = NULL;
161 1280
        AN(v1l);
162
163 1280
        THR_SetRequest(req);
164 1280
        VSLb(req->vsl, SLT_Debug, "w=%p enter dbg_sendbody", wrk);
165 1280
        AZ(req->wrk);
166 1280
        CNT_Embark(wrk, req);
167 1280
        req->vdc->wrk = wrk;    // move to CNT_Embark?
168
169 1280
        chunked = http_GetHdr(req->resp, H_Transfer_Encoding, &p) && strcmp(p, "chunked") == 0;
170 1280
        if (chunked)
171 640
                V1L_Chunked(v1l);
172 1280
        err = VDP_DeliverObj(req->vdc, req->objcore);
173 1280
        if (!err && chunked)
174 640
                V1L_EndChunk(v1l);
175 1280
        dbg_deliver_finish(req, &v1l, err);
176
177 1280
        VSLb(req->vsl, SLT_Debug, "w=%p resuming http1_req", wrk);
178 1280
        wrk->task->func = hack_http1_req;
179 1280
        wrk->task->priv = req;
180 1280
}
181
182
static void
183 1280
dbg_deliver_finish(struct req *req, struct v1l **v1lp, int err)
184
{
185
        stream_close_t sc;
186
        uint64_t bytes;
187
188 1280
        sc = V1L_Close(v1lp, &bytes);
189
190 1280
        req->acct.resp_bodybytes += VDP_Close(req->vdc, req->objcore, req->boc);
191
192 1280
        if (sc == SC_NULL && err && req->sp->fd >= 0)
193 0
                sc = SC_REM_CLOSE;
194 1280
        if (sc != SC_NULL)
195 0
                Req_Fail(req, sc);
196 1280
}
197
198
static struct transport DBG_transport;
199
200
void
201 3920
debug_transport_init(void)
202
{
203 3920
        DBG_transport = HTTP1_transport;
204 3920
        DBG_transport.name = "DBG";
205 3920
        DBG_transport.deliver = dbg_deliver;
206 3920
}
207
208
void
209 1280
debug_transport_use_reembarking_http1(VRT_CTX)
210
{
211
        struct req *req;
212
213 1280
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
214 1280
        req = ctx->req;
215 1280
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
216
217 1280
        if (req->transport != &HTTP1_transport) {
218 0
                VRT_fail(ctx, "Only works on built-in http1 transport");
219 0
                return;
220
        }
221 1280
        AZ(req->transport_priv);
222 1280
        req->transport = &DBG_transport;
223 1280
}