diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-generic/vmlinux.lds.h | 19 | ||||
| -rw-r--r-- | include/linux/acpi.h | 3 | ||||
| -rw-r--r-- | include/linux/bio-integrity.h | 152 | ||||
| -rw-r--r-- | include/linux/bio.h | 156 | ||||
| -rw-r--r-- | include/linux/blk-integrity.h | 1 | ||||
| -rw-r--r-- | include/linux/blk-mq.h | 127 | ||||
| -rw-r--r-- | include/linux/blk_types.h | 1 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 31 | ||||
| -rw-r--r-- | include/linux/cgroup-defs.h | 3 | ||||
| -rw-r--r-- | include/linux/f2fs_fs.h | 7 | ||||
| -rw-r--r-- | include/linux/hwspinlock.h | 6 | ||||
| -rw-r--r-- | include/linux/i2c.h | 21 | ||||
| -rw-r--r-- | include/linux/interrupt.h | 2 | ||||
| -rw-r--r-- | include/linux/irq.h | 43 | ||||
| -rw-r--r-- | include/linux/irqchip/arm-gic-v4.h | 8 | ||||
| -rw-r--r-- | include/linux/irqdomain.h | 136 | ||||
| -rw-r--r-- | include/linux/module.h | 6 | ||||
| -rw-r--r-- | include/linux/msi.h | 52 | ||||
| -rw-r--r-- | include/linux/pagemap.h | 6 | ||||
| -rw-r--r-- | include/linux/platform_data/i2c-mux-gpio.h | 2 | ||||
| -rw-r--r-- | include/linux/power_supply.h | 19 | ||||
| -rw-r--r-- | include/linux/sbitmap.h | 5 | ||||
| -rw-r--r-- | include/linux/soc/qcom/smem.h | 2 |
23 files changed, 466 insertions, 342 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 677315e51e54..ad6afc5c4918 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -443,29 +443,10 @@ #endif /* - * Some symbol definitions will not exist yet during the first pass of the - * link, but are guaranteed to exist in the final link. Provide preliminary - * definitions that will be superseded in the final link to avoid having to - * rely on weak external linkage, which requires a GOT when used in position - * independent code. - */ -#define PRELIMINARY_SYMBOL_DEFINITIONS \ - PROVIDE(kallsyms_addresses = .); \ - PROVIDE(kallsyms_offsets = .); \ - PROVIDE(kallsyms_names = .); \ - PROVIDE(kallsyms_num_syms = .); \ - PROVIDE(kallsyms_relative_base = .); \ - PROVIDE(kallsyms_token_table = .); \ - PROVIDE(kallsyms_token_index = .); \ - PROVIDE(kallsyms_markers = .); \ - PROVIDE(kallsyms_seqs_of_names = .); - -/* * Read only Data */ #define RO_DATA(align) \ . = ALIGN((align)); \ - PRELIMINARY_SYMBOL_DEFINITIONS \ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ __start_rodata = .; \ *(.rodata) *(.rodata.*) \ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index e93059f71c71..f0b95c76c707 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -274,6 +274,9 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) return phys_id == PHYS_CPUID_INVALID; } + +int __init acpi_get_madt_revision(void); + /* Validate the processor object's proc_id */ bool acpi_duplicate_processor_id(int proc_id); /* Processor _CTS control */ diff --git a/include/linux/bio-integrity.h b/include/linux/bio-integrity.h new file mode 100644 index 000000000000..dd831c269e99 --- /dev/null +++ b/include/linux/bio-integrity.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BIO_INTEGRITY_H +#define _LINUX_BIO_INTEGRITY_H + +#include <linux/bio.h> + +enum bip_flags { + BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ + BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ + BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ + BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ + BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ + BIP_COPY_USER = 1 << 5, /* Kernel bounce buffer in use */ +}; + +struct bio_integrity_payload { + struct bio *bip_bio; /* parent bio */ + + struct bvec_iter bip_iter; + + unsigned short bip_vcnt; /* # of integrity bio_vecs */ + unsigned short bip_max_vcnt; /* integrity bio_vec slots */ + unsigned short bip_flags; /* control flags */ + + struct bvec_iter bio_iter; /* for rewinding parent bio */ + + struct work_struct bip_work; /* I/O completion */ + + struct bio_vec *bip_vec; + struct bio_vec bip_inline_vecs[];/* embedded bvec array */ +}; + +#ifdef CONFIG_BLK_DEV_INTEGRITY + +#define bip_for_each_vec(bvl, bip, iter) \ + for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) + +#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ + for_each_bio(_bio) \ + bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) + +static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) +{ + if (bio->bi_opf & REQ_INTEGRITY) + return bio->bi_integrity; + + return NULL; +} + +static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + + if (bip) + return bip->bip_flags & flag; + + return false; +} + +static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) +{ + return bip->bip_iter.bi_sector; +} + +static inline void bip_set_seed(struct bio_integrity_payload *bip, + sector_t seed) +{ + bip->bip_iter.bi_sector = seed; +} + +struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp, + unsigned int nr); +int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len, + unsigned int offset); +int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed); +void bio_integrity_unmap_user(struct bio *bio); +bool bio_integrity_prep(struct bio *bio); +void bio_integrity_advance(struct bio *bio, unsigned int bytes_done); +void bio_integrity_trim(struct bio *bio); +int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask); +int bioset_integrity_create(struct bio_set *bs, int pool_size); +void bioset_integrity_free(struct bio_set *bs); +void bio_integrity_init(void); + +#else /* CONFIG_BLK_DEV_INTEGRITY */ + +static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) +{ + return NULL; +} + +static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) +{ + return 0; +} + +static inline void bioset_integrity_free(struct bio_set *bs) +{ +} + +static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf, + ssize_t len, u32 seed) +{ + return -EINVAL; +} + +static inline void bio_integrity_unmap_user(struct bio *bio) +{ +} + +static inline bool bio_integrity_prep(struct bio *bio) +{ + return true; +} + +static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, + gfp_t gfp_mask) +{ + return 0; +} + +static inline void bio_integrity_advance(struct bio *bio, + unsigned int bytes_done) +{ +} + +static inline void bio_integrity_trim(struct bio *bio) +{ +} + +static inline void bio_integrity_init(void) +{ +} + +static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) +{ + return false; +} + +static inline struct bio_integrity_payload * +bio_integrity_alloc(struct bio *bio, gfp_t gfp, unsigned int nr) +{ + return ERR_PTR(-EINVAL); +} + +static inline int bio_integrity_add_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int offset) +{ + return 0; +} +#endif /* CONFIG_BLK_DEV_INTEGRITY */ +#endif /* _LINUX_BIO_INTEGRITY_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 818e93612947..a46e2047bea4 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -321,69 +321,6 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio) #define bio_for_each_folio_all(fi, bio) \ for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio)) -enum bip_flags { - BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ - BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ - BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ - BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ - BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ - BIP_INTEGRITY_USER = 1 << 5, /* Integrity payload is user address */ - BIP_COPY_USER = 1 << 6, /* Kernel bounce buffer in use */ -}; - -/* - * bio integrity payload - */ -struct bio_integrity_payload { - struct bio *bip_bio; /* parent bio */ - - struct bvec_iter bip_iter; - - unsigned short bip_vcnt; /* # of integrity bio_vecs */ - unsigned short bip_max_vcnt; /* integrity bio_vec slots */ - unsigned short bip_flags; /* control flags */ - - struct bvec_iter bio_iter; /* for rewinding parent bio */ - - struct work_struct bip_work; /* I/O completion */ - - struct bio_vec *bip_vec; - struct bio_vec bip_inline_vecs[];/* embedded bvec array */ -}; - -#if defined(CONFIG_BLK_DEV_INTEGRITY) - -static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) -{ - if (bio->bi_opf & REQ_INTEGRITY) - return bio->bi_integrity; - - return NULL; -} - -static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) -{ - struct bio_integrity_payload *bip = bio_integrity(bio); - - if (bip) - return bip->bip_flags & flag; - - return false; -} - -static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) -{ - return bip->bip_iter.bi_sector; -} - -static inline void bip_set_seed(struct bio_integrity_payload *bip, - sector_t seed) -{ - bip->bip_iter.bi_sector = seed; -} - -#endif /* CONFIG_BLK_DEV_INTEGRITY */ - void bio_trim(struct bio *bio, sector_t offset, sector_t size); extern struct bio *bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs); @@ -721,99 +658,6 @@ static inline bool bioset_initialized(struct bio_set *bs) return bs->bio_slab != NULL; } -#if defined(CONFIG_BLK_DEV_INTEGRITY) - -#define bip_for_each_vec(bvl, bip, iter) \ - for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) - -#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ - for_each_bio(_bio) \ - bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) - -int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed); -void bio_integrity_unmap_free_user(struct bio *bio); -extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); -extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); -extern bool bio_integrity_prep(struct bio *); -extern void bio_integrity_advance(struct bio *, unsigned int); -extern void bio_integrity_trim(struct bio *); -extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); -extern int bioset_integrity_create(struct bio_set *, int); -extern void bioset_integrity_free(struct bio_set *); -extern void bio_integrity_init(void); - -#else /* CONFIG_BLK_DEV_INTEGRITY */ - -static inline void *bio_integrity(struct bio *bio) -{ - return NULL; -} - -static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) -{ - return 0; -} - -static inline void bioset_integrity_free (struct bio_set *bs) -{ - return; -} - -static inline bool bio_integrity_prep(struct bio *bio) -{ - return true; -} - -static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, - gfp_t gfp_mask) -{ - return 0; -} - -static inline void bio_integrity_advance(struct bio *bio, - unsigned int bytes_done) -{ - return; -} - -static inline void bio_integrity_trim(struct bio *bio) -{ - return; -} - -static inline void bio_integrity_init(void) -{ - return; -} - -static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) -{ - return false; -} - -static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, - unsigned int nr) -{ - return ERR_PTR(-EINVAL); -} - -static inline int bio_integrity_add_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int offset) -{ - return 0; -} - -static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf, - ssize_t len, u32 seed) -{ - return -EINVAL; -} -static inline void bio_integrity_unmap_free_user(struct bio *bio) -{ -} - -#endif /* CONFIG_BLK_DEV_INTEGRITY */ - /* * Mark a bio as polled. Note that for async polled IO, the caller must * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index 804f856ed3e5..de98049b7ded 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -3,6 +3,7 @@ #define _LINUX_BLK_INTEGRITY_H #include <linux/blk-mq.h> +#include <linux/bio-integrity.h> struct request; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 89ba6b16fe8b..8d304b1d16b1 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -27,38 +27,61 @@ typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t); * request flags */ typedef __u32 __bitwise req_flags_t; -/* drive already may have started this one */ -#define RQF_STARTED ((__force req_flags_t)(1 << 1)) -/* request for flush sequence */ -#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) -/* merge of different types, fail separately */ -#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) -/* don't call prep for this one */ -#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) -/* use hctx->sched_tags */ -#define RQF_SCHED_TAGS ((__force req_flags_t)(1 << 8)) -/* use an I/O scheduler for this request */ -#define RQF_USE_SCHED ((__force req_flags_t)(1 << 9)) -/* vaguely specified driver internal error. Ignored by the block layer */ -#define RQF_FAILED ((__force req_flags_t)(1 << 10)) -/* don't warn about errors */ -#define RQF_QUIET ((__force req_flags_t)(1 << 11)) -/* account into disk and partition IO statistics */ -#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) -/* runtime pm request */ -#define RQF_PM ((__force req_flags_t)(1 << 15)) -/* on IO scheduler merge hash */ -#define RQF_HASHED ((__force req_flags_t)(1 << 16)) -/* track IO completion time */ -#define RQF_STATS ((__force req_flags_t)(1 << 17)) -/* Look at ->special_vec for the actual data payload instead of the - bio chain. */ -#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) -/* The request completion needs to be signaled to zone write pluging. */ -#define RQF_ZONE_WRITE_PLUGGING ((__force req_flags_t)(1 << 20)) -/* ->timeout has been called, don't expire again */ -#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) -#define RQF_RESV ((__force req_flags_t)(1 << 23)) +/* Keep rqf_name[] in sync with the definitions below */ +enum { + /* drive already may have started this one */ + __RQF_STARTED, + /* request for flush sequence */ + __RQF_FLUSH_SEQ, + /* merge of different types, fail separately */ + __RQF_MIXED_MERGE, + /* don't call prep for this one */ + __RQF_DONTPREP, + /* use hctx->sched_tags */ + __RQF_SCHED_TAGS, + /* use an I/O scheduler for this request */ + __RQF_USE_SCHED, + /* vaguely specified driver internal error. Ignored by block layer */ + __RQF_FAILED, + /* don't warn about errors */ + __RQF_QUIET, + /* account into disk and partition IO statistics */ + __RQF_IO_STAT, + /* runtime pm request */ + __RQF_PM, + /* on IO scheduler merge hash */ + __RQF_HASHED, + /* track IO completion time */ + __RQF_STATS, + /* Look at ->special_vec for the actual data payload instead of the + bio chain. */ + __RQF_SPECIAL_PAYLOAD, + /* request completion needs to be signaled to zone write plugging. */ + __RQF_ZONE_WRITE_PLUGGING, + /* ->timeout has been called, don't expire again */ + __RQF_TIMED_OUT, + __RQF_RESV, + __RQF_BITS +}; + +#define RQF_STARTED ((__force req_flags_t)(1 << __RQF_STARTED)) +#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << __RQF_FLUSH_SEQ)) +#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << __RQF_MIXED_MERGE)) +#define RQF_DONTPREP ((__force req_flags_t)(1 << __RQF_DONTPREP)) +#define RQF_SCHED_TAGS ((__force req_flags_t)(1 << __RQF_SCHED_TAGS)) +#define RQF_USE_SCHED ((__force req_flags_t)(1 << __RQF_USE_SCHED)) +#define RQF_FAILED ((__force req_flags_t)(1 << __RQF_FAILED)) +#define RQF_QUIET ((__force req_flags_t)(1 << __RQF_QUIET)) +#define RQF_IO_STAT ((__force req_flags_t)(1 << __RQF_IO_STAT)) +#define RQF_PM ((__force req_flags_t)(1 << __RQF_PM)) +#define RQF_HASHED ((__force req_flags_t)(1 << __RQF_HASHED)) +#define RQF_STATS ((__force req_flags_t)(1 << __RQF_STATS)) +#define RQF_SPECIAL_PAYLOAD \ + ((__force req_flags_t)(1 << __RQF_SPECIAL_PAYLOAD)) +#define RQF_ZONE_WRITE_PLUGGING \ + ((__force req_flags_t)(1 << __RQF_ZONE_WRITE_PLUGGING)) +#define RQF_TIMED_OUT ((__force req_flags_t)(1 << __RQF_TIMED_OUT)) +#define RQF_RESV ((__force req_flags_t)(1 << __RQF_RESV)) /* flags that prevent us from merging requests: */ #define RQF_NOMERGE_FLAGS \ @@ -278,8 +301,12 @@ enum blk_eh_timer_return { BLK_EH_RESET_TIMER, }; -#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ -#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ +/* Keep alloc_policy_name[] in sync with the definitions below */ +enum { + BLK_TAG_ALLOC_FIFO, /* allocate starting from 0 */ + BLK_TAG_ALLOC_RR, /* allocate starting from last allocated tag */ + BLK_TAG_ALLOC_MAX +}; /** * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware @@ -644,6 +671,7 @@ struct blk_mq_ops { #endif }; +/* Keep hctx_flag_name[] in sync with the definitions below */ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1, @@ -653,27 +681,17 @@ enum { */ BLK_MQ_F_STACKING = 1 << 2, BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, - BLK_MQ_F_BLOCKING = 1 << 5, + BLK_MQ_F_BLOCKING = 1 << 4, /* Do not allow an I/O scheduler to be configured. */ - BLK_MQ_F_NO_SCHED = 1 << 6, + BLK_MQ_F_NO_SCHED = 1 << 5, + /* * Select 'none' during queue registration in case of a single hwq * or shared hwqs instead of 'mq-deadline'. */ - BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7, - BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, + BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 6, + BLK_MQ_F_ALLOC_POLICY_START_BIT = 7, BLK_MQ_F_ALLOC_POLICY_BITS = 1, - - BLK_MQ_S_STOPPED = 0, - BLK_MQ_S_TAG_ACTIVE = 1, - BLK_MQ_S_SCHED_RESTART = 2, - - /* hw queue is inactive after all its CPUs become offline */ - BLK_MQ_S_INACTIVE = 3, - - BLK_MQ_MAX_DEPTH = 10240, - - BLK_MQ_CPU_WORK_BATCH = 8, }; #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ @@ -682,8 +700,19 @@ enum { ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ << BLK_MQ_F_ALLOC_POLICY_START_BIT) +#define BLK_MQ_MAX_DEPTH (10240) #define BLK_MQ_NO_HCTX_IDX (-1U) +enum { + /* Keep hctx_state_name[] in sync with the definitions below */ + BLK_MQ_S_STOPPED, + BLK_MQ_S_TAG_ACTIVE, + BLK_MQ_S_SCHED_RESTART, + /* hw queue is inactive after all its CPUs become offline */ + BLK_MQ_S_INACTIVE, + BLK_MQ_S_MAX +}; + struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, struct queue_limits *lim, void *queuedata, struct lock_class_key *lkclass); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 632edd71f8c6..36ed96133217 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -354,6 +354,7 @@ enum req_op { REQ_OP_LAST = (__force blk_opf_t)36, }; +/* Keep cmd_flag_name[] in sync with the definitions below */ enum req_flag_bits { __REQ_FAILFAST_DEV = /* no driver retries of device errors */ REQ_OP_BITS, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b8196e219ac2..e85ec73a07d5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -588,27 +588,28 @@ struct request_queue { }; /* Keep blk_queue_flag_name[] in sync with the definitions below */ -#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ -#define QUEUE_FLAG_DYING 1 /* queue being torn down */ -#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ -#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ -#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ -#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ -#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ -#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ -#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ -#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ -#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ -#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ -#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ -#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ +enum { + QUEUE_FLAG_DYING, /* queue being torn down */ + QUEUE_FLAG_NOMERGES, /* disable merge attempts */ + QUEUE_FLAG_SAME_COMP, /* complete on same CPU-group */ + QUEUE_FLAG_FAIL_IO, /* fake timeout */ + QUEUE_FLAG_NOXMERGES, /* No extended merges */ + QUEUE_FLAG_SAME_FORCE, /* force complete on same CPU */ + QUEUE_FLAG_INIT_DONE, /* queue is initialized */ + QUEUE_FLAG_STATS, /* track IO start and completion times */ + QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */ + QUEUE_FLAG_QUIESCED, /* queue has been quiesced */ + QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */ + QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */ + QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */ + QUEUE_FLAG_MAX +}; #define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP) void blk_queue_flag_set(unsigned int flag, struct request_queue *q); void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); -#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 293af7f8a694..ae04035b6cbe 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -539,9 +539,6 @@ struct cgroup { /* used to store eBPF programs */ struct cgroup_bpf bpf; - /* If there is block congestion on this cgroup. */ - atomic_t congestion_count; - /* Used to store internal freezer state */ struct cgroup_freezer_state freezer; diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 41d1d71c36ff..01bee2b289c2 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -259,15 +259,14 @@ struct node_footer { #define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \ get_extra_isize(inode)) #define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */ -#define ADDRS_PER_INODE(inode) addrs_per_inode(inode) +#define ADDRS_PER_INODE(inode) addrs_per_page(inode, true) /* Address Pointers in a Direct Block */ #define DEF_ADDRS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32)) -#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode) +#define ADDRS_PER_BLOCK(inode) addrs_per_page(inode, false) /* Node IDs in an Indirect Block */ #define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32)) -#define ADDRS_PER_PAGE(page, inode) \ - (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode)) +#define ADDRS_PER_PAGE(page, inode) (addrs_per_page(inode, IS_INODE(page))) #define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1) #define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2) diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h index bfe7c1f1ac6d..f0231dbc4777 100644 --- a/include/linux/hwspinlock.h +++ b/include/linux/hwspinlock.h @@ -68,6 +68,7 @@ int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int, int __hwspin_trylock(struct hwspinlock *, int, unsigned long *); void __hwspin_unlock(struct hwspinlock *, int, unsigned long *); int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name); +int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id); int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock); struct hwspinlock *devm_hwspin_lock_request(struct device *dev); struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev, @@ -127,6 +128,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) { } +static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id) +{ + return 0; +} + static inline int of_hwspin_lock_get_id(struct device_node *np, int index) { return 0; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index e9cc14b1f9a1..8caaa13834bf 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -30,7 +30,6 @@ extern const struct device_type i2c_client_type; /* --- General options ------------------------------------------------ */ struct i2c_msg; -struct i2c_algorithm; struct i2c_adapter; struct i2c_client; struct i2c_driver; @@ -512,16 +511,15 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, #endif /* I2C_BOARDINFO */ /** - * struct i2c_algorithm - represent I2C transfer method - * @xfer: Issue a set of i2c transactions to the given I2C adapter - * defined by the msgs array, with num messages available to transfer via - * the adapter specified by adap. - * @xfer_atomic: same as @xfer. Yet, only using atomic context - * so e.g. PMICs can be accessed very late before shutdown. Optional. - * @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this + * struct i2c_algorithm - represent I2C transfer methods + * @xfer: Transfer a given number of messages defined by the msgs array via + * the specified adapter. + * @xfer_atomic: Same as @xfer. Yet, only using atomic context so e.g. PMICs + * can be accessed very late before shutdown. Optional. + * @smbus_xfer: Issue SMBus transactions to the given I2C adapter. If this * is not present, then the bus layer will try and convert the SMBus calls * into I2C transfers instead. - * @smbus_xfer_atomic: same as @smbus_xfer. Yet, only using atomic context + * @smbus_xfer_atomic: Same as @smbus_xfer. Yet, only using atomic context * so e.g. PMICs can be accessed very late before shutdown. Optional. * @functionality: Return the flags that this algorithm/adapter pair supports * from the ``I2C_FUNC_*`` flags. @@ -533,8 +531,6 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, * @reg_slave: deprecated, use @reg_target * @unreg_slave: deprecated, use @unreg_target * - * - * The following structs are for those who like to implement new bus drivers: * i2c_algorithm is the interface to a class of hardware solutions which can * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584 * to name two of the most common. @@ -550,9 +546,6 @@ struct i2c_algorithm { * to NULL. If an adapter algorithm can do SMBus access, set * smbus_xfer. If set to NULL, the SMBus protocol is simulated * using common I2C messages. - * - * xfer should return the number of messages successfully - * processed, or a negative value on error */ union { int (*xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs, diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index bea39a0292eb..3f30c88e0b4c 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -169,7 +169,7 @@ static inline int __must_check request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev) { - return request_threaded_irq(irq, handler, NULL, flags, name, dev); + return request_threaded_irq(irq, handler, NULL, flags | IRQF_COND_ONESHOT, name, dev); } extern int __must_check diff --git a/include/linux/irq.h b/include/linux/irq.h index a217e1029c1d..1f5dbf1f92c9 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -1106,6 +1106,7 @@ enum irq_gc_flags { * @irq_flags_to_set: IRQ* flags to set on irq setup * @irq_flags_to_clear: IRQ* flags to clear on irq setup * @gc_flags: Generic chip specific setup flags + * @exit: Function called on each chip when they are destroyed. * @gc: Array of pointers to generic interrupt chips */ struct irq_domain_chip_generic { @@ -1114,9 +1115,37 @@ struct irq_domain_chip_generic { unsigned int irq_flags_to_clear; unsigned int irq_flags_to_set; enum irq_gc_flags gc_flags; + void (*exit)(struct irq_chip_generic *gc); struct irq_chip_generic *gc[]; }; +/** + * struct irq_domain_chip_generic_info - Generic chip information structure + * @name: Name of the generic interrupt chip + * @handler: Interrupt handler used by the generic interrupt chip + * @irqs_per_chip: Number of interrupts each chip handles (max 32) + * @num_ct: Number of irq_chip_type instances associated with each + * chip + * @irq_flags_to_clear: IRQ_* bits to clear in the mapping function + * @irq_flags_to_set: IRQ_* bits to set in the mapping function + * @gc_flags: Generic chip specific setup flags + * @init: Function called on each chip when they are created. + * Allow to do some additional chip initialisation. + * @exit: Function called on each chip when they are destroyed. + * Allow to do some chip cleanup operation. + */ +struct irq_domain_chip_generic_info { + const char *name; + irq_flow_handler_t handler; + unsigned int irqs_per_chip; + unsigned int num_ct; + unsigned int irq_flags_to_clear; + unsigned int irq_flags_to_set; + enum irq_gc_flags gc_flags; + int (*init)(struct irq_chip_generic *gc); + void (*exit)(struct irq_chip_generic *gc); +}; + /* Generic chip callback functions */ void irq_gc_noop(struct irq_data *d); void irq_gc_mask_disable_reg(struct irq_data *d); @@ -1153,6 +1182,20 @@ int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc, struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq); +#ifdef CONFIG_GENERIC_IRQ_CHIP +int irq_domain_alloc_generic_chips(struct irq_domain *d, + const struct irq_domain_chip_generic_info *info); +void irq_domain_remove_generic_chips(struct irq_domain *d); +#else +static inline int +irq_domain_alloc_generic_chips(struct irq_domain *d, + const struct irq_domain_chip_generic_info *info) +{ + return -EINVAL; +} +static inline void irq_domain_remove_generic_chips(struct irq_domain *d) { } +#endif /* CONFIG_GENERIC_IRQ_CHIP */ + int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, int num_ct, const char *name, irq_flow_handler_t handler, diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 2c63375bbd43..ecabed6d3307 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -25,6 +25,14 @@ struct its_vm { irq_hw_number_t db_lpi_base; unsigned long *db_bitmap; int nr_db_lpis; + /* + * Ensures mutual exclusion between updates to vlpi_count[] + * and map/unmap when using the ITSList mechanism. + * + * The lock order for any sequence involving the ITSList is + * vmapp_lock -> vpe_lock ->vmovp_lock. + */ + raw_spinlock_t vmapp_lock; u32 vlpi_count[GICv4_ITS_LIST_MAX]; }; diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 21ecf582a0fe..de6105f68fec 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -74,11 +74,24 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args, * struct irq_domain_ops - Methods for irq_domain objects * @match: Match an interrupt controller device node to a host, returns * 1 on a match + * @select: Match an interrupt controller fw specification. It is more generic + * than @match as it receives a complete struct irq_fwspec. Therefore, + * @select is preferred if provided. Returns 1 on a match. * @map: Create or update a mapping between a virtual irq number and a hw * irq number. This is called only once for a given mapping. * @unmap: Dispose of such a mapping * @xlate: Given a device tree node and interrupt specifier, decode * the hardware irq number and linux irq type value. + * @alloc: Allocate @nr_irqs interrupts starting from @virq. + * @free: Free @nr_irqs interrupts starting from @virq. + * @activate: Activate one interrupt in HW (@irqd). If @reserve is set, only + * reserve the vector. If unset, assign the vector (called from + * request_irq()). + * @deactivate: Disarm one interrupt (@irqd). + * @translate: Given @fwspec, decode the hardware irq number (@out_hwirq) and + * linux irq type value (@out_type). This is a generalised @xlate + * (over struct irq_fwspec) and is preferred if provided. + * @debug_show: For domains to show specific data for an interrupt in debugfs. * * Functions below are provided by the driver and called whenever a new mapping * is created or an old mapping is disposed. The driver can then proceed to @@ -131,6 +144,9 @@ struct irq_domain_chip_generic; * Optional elements: * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy * to swap it for the of_node via the irq_domain_get_of_node accessor + * @bus_token: @fwnode's device_node might be used for several irq domains. But + * in connection with @bus_token, the pair shall be unique in a + * system. * @gc: Pointer to a list of generic chips. There is a helper function for * setting up one or more generic chips for interrupt controllers * drivers using the generic chip library which uses this pointer. @@ -141,9 +157,12 @@ struct irq_domain_chip_generic; * purposes related to the irq domain. * @parent: Pointer to parent irq_domain to support hierarchy irq_domains * @msi_parent_ops: Pointer to MSI parent domain methods for per device domain init + * @exit: Function called when the domain is destroyed * * Revmap data, used internally by the irq domain code: - * @revmap_size: Size of the linear map table @revmap[] + * @hwirq_max: Top limit for the HW irq number. Especially to avoid + * conflicts/failures with reserved HW irqs. Can be ~0. + * @revmap_size: Size of the linear map table @revmap * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map * @revmap: Linear table of irq_data pointers */ @@ -169,6 +188,7 @@ struct irq_domain { #ifdef CONFIG_GENERIC_MSI_IRQ const struct msi_parent_ops *msi_parent_ops; #endif + void (*exit)(struct irq_domain *d); /* reverse map data. The linear map gets appended to the irq_domain */ irq_hw_number_t hwirq_max; @@ -182,7 +202,7 @@ enum { /* Irq domain is hierarchical */ IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0), - /* Irq domain name was allocated in __irq_domain_add() */ + /* Irq domain name was allocated internally */ IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1), /* Irq domain is an IPI domain with virq per cpu */ @@ -208,6 +228,9 @@ enum { /* Irq domain is a MSI device domain */ IRQ_DOMAIN_FLAG_MSI_DEVICE = (1 << 9), + /* Irq domain must destroy generic chips when removed */ + IRQ_DOMAIN_FLAG_DESTROY_GC = (1 << 10), + /* * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved * for implementation specific purposes and ignored by the @@ -257,10 +280,51 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa) } void irq_domain_free_fwnode(struct fwnode_handle *fwnode); -struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size, - irq_hw_number_t hwirq_max, int direct_max, - const struct irq_domain_ops *ops, - void *host_data); + +struct irq_domain_chip_generic_info; + +/** + * struct irq_domain_info - Domain information structure + * @fwnode: firmware node for the interrupt controller + * @domain_flags: Additional flags to add to the domain flags + * @size: Size of linear map; 0 for radix mapping only + * @hwirq_max: Maximum number of interrupts supported by controller + * @direct_max: Maximum value of direct maps; + * Use ~0 for no limit; 0 for no direct mapping + * @bus_token: Domain bus token + * @ops: Domain operation callbacks + * @host_data: Controller private data pointer + * @dgc_info: Geneneric chip information structure pointer used to + * create generic chips for the domain if not NULL. + * @init: Function called when the domain is created. + * Allow to do some additional domain initialisation. + * @exit: Function called when the domain is destroyed. + * Allow to do some additional cleanup operation. + */ +struct irq_domain_info { + struct fwnode_handle *fwnode; + unsigned int domain_flags; + unsigned int size; + irq_hw_number_t hwirq_max; + int direct_max; + enum irq_domain_bus_token bus_token; + const struct irq_domain_ops *ops; + void *host_data; +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + /** + * @parent: Pointer to the parent irq domain used in a hierarchy domain + */ + struct irq_domain *parent; +#endif + struct irq_domain_chip_generic_info *dgc_info; + int (*init)(struct irq_domain *d); + void (*exit)(struct irq_domain *d); +}; + +struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info); +struct irq_domain *devm_irq_domain_instantiate(struct device *dev, + const struct irq_domain_info *info); + struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, unsigned int size, unsigned int first_irq, @@ -293,7 +357,7 @@ static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) extern const struct fwnode_operations irqchip_fwnode_ops; -static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode) +static inline bool is_fwnode_irqchip(const struct fwnode_handle *fwnode) { return fwnode && fwnode->ops == &irqchip_fwnode_ops; } @@ -350,7 +414,17 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no const struct irq_domain_ops *ops, void *host_data) { - return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); + struct irq_domain_info info = { + .fwnode = of_node_to_fwnode(of_node), + .size = size, + .hwirq_max = size, + .ops = ops, + .host_data = host_data, + }; + struct irq_domain *d; + + d = irq_domain_instantiate(&info); + return IS_ERR(d) ? NULL : d; } #ifdef CONFIG_IRQ_DOMAIN_NOMAP @@ -359,7 +433,17 @@ static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_nod const struct irq_domain_ops *ops, void *host_data) { - return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data); + struct irq_domain_info info = { + .fwnode = of_node_to_fwnode(of_node), + .hwirq_max = max_irq, + .direct_max = max_irq, + .ops = ops, + .host_data = host_data, + }; + struct irq_domain *d; + + d = irq_domain_instantiate(&info); + return IS_ERR(d) ? NULL : d; } extern unsigned int irq_create_direct_mapping(struct irq_domain *host); @@ -369,7 +453,16 @@ static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node const struct irq_domain_ops *ops, void *host_data) { - return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data); + struct irq_domain_info info = { + .fwnode = of_node_to_fwnode(of_node), + .hwirq_max = ~0U, + .ops = ops, + .host_data = host_data, + }; + struct irq_domain *d; + + d = irq_domain_instantiate(&info); + return IS_ERR(d) ? NULL : d; } static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode, @@ -377,14 +470,33 @@ static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle * const struct irq_domain_ops *ops, void *host_data) { - return __irq_domain_add(fwnode, size, size, 0, ops, host_data); + struct irq_domain_info info = { + .fwnode = fwnode, + .size = size, + .hwirq_max = size, + .ops = ops, + .host_data = host_data, + }; + struct irq_domain *d; + + d = irq_domain_instantiate(&info); + return IS_ERR(d) ? NULL : d; } static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode, const struct irq_domain_ops *ops, void *host_data) { - return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data); + struct irq_domain_info info = { + .fwnode = fwnode, + .hwirq_max = ~0, + .ops = ops, + .host_data = host_data, + }; + struct irq_domain *d; + + d = irq_domain_instantiate(&info); + return IS_ERR(d) ? NULL : d; } extern void irq_domain_remove(struct irq_domain *host); diff --git a/include/linux/module.h b/include/linux/module.h index 4213d8993cd8..88ecc5e9f523 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -174,6 +174,12 @@ extern void cleanup_module(void); #define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep) /* + * Weak module dependencies. See man modprobe.d for details. + * Example: MODULE_WEAKDEP("module-foo") + */ +#define MODULE_WEAKDEP(_weakdep) MODULE_INFO(weakdep, _weakdep) + +/* * MODULE_FILE is used for generating modules.builtin * So, make it no-op when this is being built as a module */ diff --git a/include/linux/msi.h b/include/linux/msi.h index 26588da88bdd..944979763825 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -21,11 +21,7 @@ #include <linux/irqdomain_defs.h> #include <linux/cpumask_types.h> #include <linux/msi_api.h> -#include <linux/xarray.h> -#include <linux/mutex.h> -#include <linux/list.h> #include <linux/irq.h> -#include <linux/bits.h> #include <asm/msi.h> @@ -81,7 +77,6 @@ extern int pci_msi_ignore_mask; /* Helper functions */ struct msi_desc; struct pci_dev; -struct platform_msi_priv_data; struct device_attribute; struct irq_domain; struct irq_affinity_desc; @@ -228,22 +223,6 @@ struct msi_dev_domain { struct irq_domain *domain; }; -/** - * msi_device_data - MSI per device data - * @properties: MSI properties which are interesting to drivers - * @platform_data: Platform-MSI specific data - * @mutex: Mutex protecting the MSI descriptor store - * @__domains: Internal data for per device MSI domains - * @__iter_idx: Index to search the next entry for iterators - */ -struct msi_device_data { - unsigned long properties; - struct platform_msi_priv_data *platform_data; - struct mutex mutex; - struct msi_dev_domain __domains[MSI_MAX_DEVICE_IRQDOMAINS]; - unsigned long __iter_idx; -}; - int msi_setup_device_data(struct device *dev); void msi_lock_descs(struct device *dev); @@ -556,6 +535,8 @@ enum { MSI_FLAG_USE_DEV_FWNODE = (1 << 7), /* Set parent->dev into domain->pm_dev on device domain creation */ MSI_FLAG_PARENT_PM_DEV = (1 << 8), + /* Support for parent mask/unmask */ + MSI_FLAG_PCI_MSI_MASK_PARENT = (1 << 9), /* Mask for the generic functionality */ MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0), @@ -639,35 +620,6 @@ void msi_domain_free_irqs_all(struct device *dev, unsigned int domid); struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); -struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, - struct msi_domain_info *info, - struct irq_domain *parent); - -/* When an MSI domain is used as an intermediate domain */ -int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, - int nvec, msi_alloc_info_t *args); -int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, - int virq, int nvec, msi_alloc_info_t *args); -void msi_domain_depopulate_descs(struct device *dev, int virq, int nvec); - -struct irq_domain * -__platform_msi_create_device_domain(struct device *dev, - unsigned int nvec, - bool is_tree, - irq_write_msi_msg_t write_msi_msg, - const struct irq_domain_ops *ops, - void *host_data); - -#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \ - __platform_msi_create_device_domain(dev, nvec, false, write, ops, data) -#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \ - __platform_msi_create_device_domain(dev, nvec, true, write, ops, data) - -int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs); -void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq, - unsigned int nvec); -void *platform_msi_get_host_data(struct irq_domain *domain); /* Per device platform MSI */ int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec, irq_write_msi_msg_t write_msi_msg); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 483a191bb4df..d9c7edb6422b 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1537,10 +1537,4 @@ unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) { return folio_size(folio) >> inode->i_blkbits; } - -static inline -unsigned int i_blocks_per_page(struct inode *inode, struct page *page) -{ - return i_blocks_per_folio(inode, page_folio(page)); -} #endif /* _LINUX_PAGEMAP_H */ diff --git a/include/linux/platform_data/i2c-mux-gpio.h b/include/linux/platform_data/i2c-mux-gpio.h index 816a4cd3ccb5..96843aab4d1e 100644 --- a/include/linux/platform_data/i2c-mux-gpio.h +++ b/include/linux/platform_data/i2c-mux-gpio.h @@ -19,6 +19,7 @@ * position * @n_values: Number of multiplexer positions (busses to instantiate) * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used + * @settle_time: Delay to wait when a new bus is selected */ struct i2c_mux_gpio_platform_data { int parent; @@ -26,6 +27,7 @@ struct i2c_mux_gpio_platform_data { const unsigned *values; int n_values; unsigned idle; + u32 settle_time; }; #endif /* _LINUX_I2C_MUX_GPIO_H */ diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index c852cc882501..72dc7e45c90c 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -309,18 +309,11 @@ struct power_supply { #endif #ifdef CONFIG_LEDS_TRIGGERS - struct led_trigger *charging_full_trig; - char *charging_full_trig_name; + struct led_trigger *trig; struct led_trigger *charging_trig; - char *charging_trig_name; struct led_trigger *full_trig; - char *full_trig_name; - struct led_trigger *online_trig; - char *online_trig_name; struct led_trigger *charging_blink_full_solid_trig; - char *charging_blink_full_solid_trig_name; struct led_trigger *charging_orange_full_green_trig; - char *charging_orange_full_green_trig_name; #endif }; @@ -743,7 +736,7 @@ struct power_supply_battery_info { int overvoltage_limit_uv; int constant_charge_current_max_ua; int constant_charge_voltage_max_uv; - struct power_supply_maintenance_charge_table *maintenance_charge; + const struct power_supply_maintenance_charge_table *maintenance_charge; int maintenance_charge_size; int alert_low_temp_charge_current_ua; int alert_low_temp_charge_voltage_uv; @@ -762,9 +755,9 @@ struct power_supply_battery_info { int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX]; struct power_supply_resistance_temp_table *resist_table; int resist_table_size; - struct power_supply_vbat_ri_table *vbat2ri_discharging; + const struct power_supply_vbat_ri_table *vbat2ri_discharging; int vbat2ri_discharging_size; - struct power_supply_vbat_ri_table *vbat2ri_charging; + const struct power_supply_vbat_ri_table *vbat2ri_charging; int vbat2ri_charging_size; int bti_resistance_ohm; int bti_resistance_tolerance; @@ -817,7 +810,7 @@ power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table int table_len, int temp); extern int power_supply_vbat2ri(struct power_supply_battery_info *info, int vbat_uv, bool charging); -extern struct power_supply_maintenance_charge_table * +extern const struct power_supply_maintenance_charge_table * power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info, int index); extern bool power_supply_battery_bti_in_range(struct power_supply_battery_info *info, int resistance); @@ -831,7 +824,7 @@ extern int power_supply_set_battery_charged(struct power_supply *psy); static inline bool power_supply_supports_maintenance_charging(struct power_supply_battery_info *info) { - struct power_supply_maintenance_charge_table *mt; + const struct power_supply_maintenance_charge_table *mt; mt = power_supply_get_maintenance_charging_setting(info, 0); diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index d662cf136021..c09cdcc99471 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -36,6 +36,11 @@ struct sbitmap_word { * @cleared: word holding cleared bits */ unsigned long cleared ____cacheline_aligned_in_smp; + + /** + * @swap_lock: serializes simultaneous updates of ->word and ->cleared + */ + spinlock_t swap_lock; } ____cacheline_aligned_in_smp; /** diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h index 0943bf419e11..f946e3beca21 100644 --- a/include/linux/soc/qcom/smem.h +++ b/include/linux/soc/qcom/smem.h @@ -15,4 +15,6 @@ phys_addr_t qcom_smem_virt_to_phys(void *p); int qcom_smem_get_soc_id(u32 *id); int qcom_smem_get_feature_code(u32 *code); +int qcom_smem_bust_hwspin_lock_by_host(unsigned int host); + #endif |