summaryrefslogtreecommitdiff
path: root/fs/iomap/bio.c
blob: edd908183058f5de697be7e9df47096262c424a1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2010 Red Hat, Inc.
 * Copyright (C) 2016-2023 Christoph Hellwig.
 */
#include <linux/iomap.h>
#include <linux/pagemap.h>
#include "internal.h"
#include "trace.h"

static DEFINE_SPINLOCK(failed_read_lock);
static struct bio_list failed_read_list = BIO_EMPTY_LIST;

static void __iomap_read_end_io(struct bio *bio)
{
	int error = blk_status_to_errno(bio->bi_status);
	struct folio_iter fi;

	bio_for_each_folio_all(fi, bio)
		iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
	bio_put(bio);
}

static void
iomap_fail_reads(
	struct work_struct	*work)
{
	struct bio		*bio;
	struct bio_list		tmp = BIO_EMPTY_LIST;
	unsigned long		flags;

	spin_lock_irqsave(&failed_read_lock, flags);
	bio_list_merge_init(&tmp, &failed_read_list);
	spin_unlock_irqrestore(&failed_read_lock, flags);

	while ((bio = bio_list_pop(&tmp)) != NULL) {
		__iomap_read_end_io(bio);
		cond_resched();
	}
}

static DECLARE_WORK(failed_read_work, iomap_fail_reads);

static void iomap_fail_buffered_read(struct bio *bio)
{
	unsigned long flags;

	/*
	 * Bounce I/O errors to a workqueue to avoid nested i_lock acquisitions
	 * in the fserror code.  The caller no longer owns the bio reference
	 * after the spinlock drops.
	 */
	spin_lock_irqsave(&failed_read_lock, flags);
	if (bio_list_empty(&failed_read_list))
		WARN_ON_ONCE(!schedule_work(&failed_read_work));
	bio_list_add(&failed_read_list, bio);
	spin_unlock_irqrestore(&failed_read_lock, flags);
}

static void iomap_read_end_io(struct bio *bio)
{
	if (bio->bi_status) {
		iomap_fail_buffered_read(bio);
		return;
	}

	__iomap_read_end_io(bio);
}

static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
{
	struct bio *bio = ctx->read_ctx;

	if (bio)
		submit_bio(bio);
}

static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
		struct iomap_read_folio_ctx *ctx, size_t plen)
{
	struct folio *folio = ctx->cur_folio;
	const struct iomap *iomap = &iter->iomap;
	loff_t pos = iter->pos;
	size_t poff = offset_in_folio(folio, pos);
	loff_t length = iomap_length(iter);
	sector_t sector;
	struct bio *bio = ctx->read_ctx;

	sector = iomap_sector(iomap, pos);
	if (!bio || bio_end_sector(bio) != sector ||
	    !bio_add_folio(bio, folio, plen, poff)) {
		gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
		gfp_t orig_gfp = gfp;
		unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);

		if (bio)
			submit_bio(bio);

		if (ctx->rac) /* same as readahead_gfp_mask */
			gfp |= __GFP_NORETRY | __GFP_NOWARN;
		bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
				     gfp);
		/*
		 * If the bio_alloc fails, try it again for a single page to
		 * avoid having to deal with partial page reads.  This emulates
		 * what do_mpage_read_folio does.
		 */
		if (!bio)
			bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
		if (ctx->rac)
			bio->bi_opf |= REQ_RAHEAD;
		bio->bi_iter.bi_sector = sector;
		bio->bi_end_io = iomap_read_end_io;
		bio_add_folio_nofail(bio, folio, plen, poff);
		ctx->read_ctx = bio;
	}
	return 0;
}

const struct iomap_read_ops iomap_bio_read_ops = {
	.read_folio_range = iomap_bio_read_folio_range,
	.submit_read = iomap_bio_submit_read,
};
EXPORT_SYMBOL_GPL(iomap_bio_read_ops);

int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
		struct folio *folio, loff_t pos, size_t len)
{
	const struct iomap *srcmap = iomap_iter_srcmap(iter);
	struct bio_vec bvec;
	struct bio bio;

	bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
	bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
	bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
	return submit_bio_wait(&bio);
}