varnish-cache/vmod/vmod_debug_transports.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * Copyright 2024 UPLEX - Nils Goroll Systemoptimierung
4
 * All rights reserved.
5
 *
6
 * Authors: Poul-Henning Kamp <phk@phk.freebsd.dk>
7
 *          Nils Goroll <slink@uplex.de>
8
 *
9
 * SPDX-License-Identifier: BSD-2-Clause
10
 *
11
 * Redistribution and use in source and binary forms, with or without
12
 * modification, are permitted provided that the following conditions
13
 * are met:
14
 * 1. Redistributions of source code must retain the above copyright
15
 *    notice, this list of conditions and the following disclaimer.
16
 * 2. Redistributions in binary form must reproduce the above copyright
17
 *    notice, this list of conditions and the following disclaimer in the
18
 *    documentation and/or other materials provided with the distribution.
19
 *
20
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
24
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30
 * SUCH DAMAGE.
31
 */
32
33
#include "config.h"
34
35
#include "cache/cache_varnishd.h"
36
37
#include "cache/cache_filter.h"
38
#include "cache/cache_transport.h"
39
#include "http1/cache_http1.h"
40
41
#include "vmod_debug.h"
42
43
static void
44 0
dbg_error(struct req *req, struct v1l **v1lp, const char *msg)
45
{
46
47 0
        (void)req;
48 0
        (void)v1lp;
49 0
        (void)msg;
50 0
        INCOMPL();
51 0
}
52
53
static void dbg_deliver_finish(struct req *req, struct v1l **v1lp, int err);
54
static void dbg_sendbody(struct worker *wrk, void *arg);
55
56
static task_func_t *hack_http1_req = NULL;
57
58
// copied from cache_http_deliver.c, then split & modified
59
static enum vtr_deliver_e v_matchproto_(vtr_deliver_f)
60 160
dbg_deliver(struct req *req, int sendbody)
61
{
62
        struct vrt_ctx ctx[1];
63
        struct v1l *v1l;
64
65 160
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
66 160
        CHECK_OBJ_ORNULL(req->boc, BOC_MAGIC);
67 160
        CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC);
68
69 160
        if (req->doclose == SC_NULL &&
70 160
            http_HdrIs(req->resp, H_Connection, "close")) {
71 0
                req->doclose = SC_RESP_CLOSE;
72 160
        } else if (req->doclose != SC_NULL) {
73 0
                if (!http_HdrIs(req->resp, H_Connection, "close")) {
74 0
                        http_Unset(req->resp, H_Connection);
75 0
                        http_SetHeader(req->resp, "Connection: close");
76 0
                }
77 160
        } else if (!http_GetHdr(req->resp, H_Connection, NULL))
78 160
                http_SetHeader(req->resp, "Connection: keep-alive");
79
80 160
        CHECK_OBJ_NOTNULL(req->wrk, WORKER_MAGIC);
81
82 320
        v1l = V1L_Open(req->ws, &req->sp->fd, req->vsl,
83 160
            req->t_prev + SESS_TMO(req->sp, send_timeout),
84 160
            cache_param->http1_iovs);
85
86 160
        if (v1l == NULL) {
87 0
                dbg_error(req, &v1l, "Failure to init v1d (workspace_thread overflow)");
88 0
                return (VTR_D_DONE);
89
        }
90
91 160
        if (sendbody) {
92 160
                if (!http_GetHdr(req->resp, H_Content_Length, NULL)) {
93 80
                        if (req->http->protover == 11) {
94 80
                                http_SetHeader(req->resp,
95
                                    "Transfer-Encoding: chunked");
96 80
                        } else {
97 0
                                req->doclose = SC_TX_EOF;
98
                        }
99 80
                }
100 160
                INIT_OBJ(ctx, VRT_CTX_MAGIC);
101 160
                VCL_Req2Ctx(ctx, req);
102 160
                if (VDP_Push(ctx, req->vdc, req->ws, VDP_v1l, v1l)) {
103 0
                        dbg_error(req, &v1l, "Failure to push v1d processor");
104 0
                        return (VTR_D_DONE);
105
                }
106 160
        }
107
108 160
        if (WS_Overflowed(req->ws)) {
109 0
                dbg_error(req, &v1l, "workspace_client overflow");
110 0
                return (VTR_D_DONE);
111
        }
112
113 160
        if (WS_Overflowed(req->sp->ws)) {
114 0
                dbg_error(req, &v1l, "workspace_session overflow");
115 0
                return (VTR_D_DONE);
116
        }
117
118 160
        if (WS_Overflowed(req->wrk->aws)) {
119 0
                dbg_error(req, &v1l, "workspace_thread overflow");
120 0
                return (VTR_D_DONE);
121
        }
122
123 160
        req->acct.resp_hdrbytes += HTTP1_Write(v1l, req->resp, HTTP1_Resp);
124
125 160
        if (! sendbody) {
126 0
                dbg_deliver_finish(req, &v1l, 0);
127 0
                return (VTR_D_DONE);
128
        }
129
130 160
        (void)V1L_Flush(v1l);
131
132 160
        if (hack_http1_req == NULL)
133 5
                hack_http1_req = req->task->func;
134 160
        AN(hack_http1_req);
135
136 160
        VSLb(req->vsl, SLT_Debug, "w=%p scheduling dbg_sendbody", req->wrk);
137
138 160
        req->task->func = dbg_sendbody;
139 160
        req->task->priv = req;
140
141 160
        req->wrk = NULL;
142 160
        req->vdc->wrk = NULL;
143 160
        req->transport_priv = v1l;
144
145 160
        AZ(Pool_Task(req->sp->pool, req->task, TASK_QUEUE_RUSH));
146 160
        return (VTR_D_DISEMBARK);
147 160
}
148
149
static void v_matchproto_(task_func_t)
150 160
dbg_sendbody(struct worker *wrk, void *arg)
151
{
152
        struct req *req;
153
        struct v1l *v1l;
154
        const char *p;
155
        int err, chunked;
156
157 160
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
158 160
        CAST_OBJ_NOTNULL(req, arg, REQ_MAGIC);
159 160
        v1l = req->transport_priv;
160 160
        req->transport_priv = NULL;
161 160
        AN(v1l);
162
163 160
        THR_SetRequest(req);
164 160
        VSLb(req->vsl, SLT_Debug, "w=%p enter dbg_sendbody", wrk);
165 160
        AZ(req->wrk);
166 160
        CNT_Embark(wrk, req);
167 160
        req->vdc->wrk = wrk;    // move to CNT_Embark?
168
169 160
        chunked = http_GetHdr(req->resp, H_Transfer_Encoding, &p) && strcmp(p, "chunked") == 0;
170 160
        if (chunked)
171 80
                V1L_Chunked(v1l);
172 160
        err = VDP_DeliverObj(req->vdc, req->objcore);
173 160
        if (!err && chunked)
174 80
                V1L_EndChunk(v1l);
175 160
        dbg_deliver_finish(req, &v1l, err);
176
177 160
        VSLb(req->vsl, SLT_Debug, "w=%p resuming http1_req", wrk);
178 160
        wrk->task->func = hack_http1_req;
179 160
        wrk->task->priv = req;
180 160
}
181
182
static void
183 160
dbg_deliver_finish(struct req *req, struct v1l **v1lp, int err)
184
{
185
        stream_close_t sc;
186
        uint64_t bytes;
187
188 160
        sc = V1L_Close(v1lp, &bytes);
189
190 160
        req->acct.resp_bodybytes += VDP_Close(req->vdc, req->objcore, req->boc);
191
192 160
        if (sc == SC_NULL && err && req->sp->fd >= 0)
193 0
                sc = SC_REM_CLOSE;
194 160
        if (sc != SC_NULL)
195 0
                Req_Fail(req, sc);
196 160
}
197
198
static struct transport DBG_transport;
199
200
void
201 490
debug_transport_init(void)
202
{
203 490
        DBG_transport = HTTP1_transport;
204 490
        DBG_transport.name = "DBG";
205 490
        DBG_transport.deliver = dbg_deliver;
206 490
}
207
208
void
209 160
debug_transport_use_reembarking_http1(VRT_CTX)
210
{
211
        struct req *req;
212
213 160
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
214 160
        req = ctx->req;
215 160
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
216
217 160
        if (req->transport != &HTTP1_transport) {
218 0
                VRT_fail(ctx, "Only works on built-in http1 transport");
219 0
                return;
220
        }
221 160
        AZ(req->transport_priv);
222 160
        req->transport = &DBG_transport;
223 160
}