|
| 1 | +rxrpc: Move the client conn cache management to the I/O thread |
| 2 | + |
| 3 | +jira LE-1907 |
| 4 | +Rebuild_History Non-Buildable kernel-5.14.0-284.30.1.el9_2 |
| 5 | +commit-author David Howells <dhowells@redhat.com> |
| 6 | +commit 0d6bf319bc5aba4535bb46e1b607973688a2248a |
| 7 | +Empty-Commit: Cherry-Pick Conflicts during history rebuild. |
| 8 | +Will be included in final tarball splat. Ref for failed cherry-pick at: |
| 9 | +ciq/ciq_backports/kernel-5.14.0-284.30.1.el9_2/0d6bf319.failed |
| 10 | + |
| 11 | +Move the management of the client connection cache to the I/O thread rather |
| 12 | +than managing it from the namespace as an aggregate across all the local |
| 13 | +endpoints within the namespace. |
| 14 | + |
| 15 | +This will allow a load of locking to be got rid of in a future patch as |
| 16 | +only the I/O thread will be looking at the this. |
| 17 | + |
| 18 | +The downside is that the total number of cached connections on the system |
| 19 | +can get higher because the limit is now per-local rather than per-netns. |
| 20 | +We can, however, keep the number of client conns in use across the entire |
| 21 | +netfs and use that to reduce the expiration time of idle connection. |
| 22 | + |
| 23 | + Signed-off-by: David Howells <dhowells@redhat.com> |
| 24 | +cc: Marc Dionne <marc.dionne@auristor.com> |
| 25 | +cc: linux-afs@lists.infradead.org |
| 26 | +(cherry picked from commit 0d6bf319bc5aba4535bb46e1b607973688a2248a) |
| 27 | + Signed-off-by: Jonathan Maple <jmaple@ciq.com> |
| 28 | + |
| 29 | +# Conflicts: |
| 30 | +# net/rxrpc/ar-internal.h |
| 31 | +# net/rxrpc/conn_client.c |
| 32 | +# net/rxrpc/io_thread.c |
| 33 | +# net/rxrpc/local_object.c |
| 34 | +# net/rxrpc/net_ns.c |
| 35 | +diff --cc net/rxrpc/ar-internal.h |
| 36 | +index 46ce41afb431,de84061a5447..000000000000 |
| 37 | +--- a/net/rxrpc/ar-internal.h |
| 38 | ++++ b/net/rxrpc/ar-internal.h |
| 39 | +@@@ -73,13 -76,7 +73,15 @@@ struct rxrpc_net |
| 40 | + |
| 41 | + bool live; |
| 42 | + |
| 43 | +- bool kill_all_client_conns; |
| 44 | + atomic_t nr_client_conns; |
| 45 | +++<<<<<<< HEAD |
| 46 | + + spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */ |
| 47 | + + spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */ |
| 48 | + + struct list_head idle_client_conns; |
| 49 | + + struct work_struct client_conn_reaper; |
| 50 | + + struct timer_list client_conn_reap_timer; |
| 51 | +++======= |
| 52 | +++>>>>>>> 0d6bf319bc5a (rxrpc: Move the client conn cache management to the I/O thread) |
| 53 | + |
| 54 | + struct hlist_head local_endpoints; |
| 55 | + struct mutex local_mutex; /* Lock for ->local_endpoints */ |
| 56 | +@@@ -276,18 -277,27 +278,32 @@@ struct rxrpc_local |
| 57 | + struct rcu_head rcu; |
| 58 | + atomic_t active_users; /* Number of users of the local endpoint */ |
| 59 | + refcount_t ref; /* Number of references to the structure */ |
| 60 | + - struct net *net; /* The network namespace */ |
| 61 | + - struct rxrpc_net *rxnet; /* Our bits in the network namespace */ |
| 62 | + + struct rxrpc_net *rxnet; /* The network ns in which this resides */ |
| 63 | + struct hlist_node link; |
| 64 | + struct socket *socket; /* my UDP socket */ |
| 65 | + - struct task_struct *io_thread; |
| 66 | + - struct completion io_thread_ready; /* Indication that the I/O thread started */ |
| 67 | + + struct work_struct processor; |
| 68 | + + struct list_head ack_tx_queue; /* List of ACKs that need sending */ |
| 69 | + + spinlock_t ack_tx_lock; /* ACK list lock */ |
| 70 | + struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */ |
| 71 | + struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */ |
| 72 | +++<<<<<<< HEAD |
| 73 | + + struct sk_buff_head reject_queue; /* packets awaiting rejection */ |
| 74 | + + struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */ |
| 75 | +++======= |
| 76 | ++ struct sk_buff_head rx_queue; /* Received packets */ |
| 77 | ++ struct list_head conn_attend_q; /* Conns requiring immediate attention */ |
| 78 | ++ struct list_head call_attend_q; /* Calls requiring immediate attention */ |
| 79 | ++ |
| 80 | +++>>>>>>> 0d6bf319bc5a (rxrpc: Move the client conn cache management to the I/O thread) |
| 81 | + struct rb_root client_bundles; /* Client connection bundles by socket params */ |
| 82 | + spinlock_t client_bundles_lock; /* Lock for client_bundles */ |
| 83 | ++ bool kill_all_client_conns; |
| 84 | ++ spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */ |
| 85 | ++ struct list_head idle_client_conns; |
| 86 | ++ struct timer_list client_conn_reap_timer; |
| 87 | ++ unsigned long client_conn_flags; |
| 88 | ++ #define RXRPC_CLIENT_CONN_REAP_TIMER 0 /* The client conn reap timer expired */ |
| 89 | ++ |
| 90 | + spinlock_t lock; /* access lock */ |
| 91 | + rwlock_t services_lock; /* lock for services list */ |
| 92 | + int debug_id; /* debug ID for printks */ |
| 93 | +@@@ -860,19 -939,16 +876,25 @@@ static inline bool rxrpc_is_client_call |
| 94 | + extern unsigned int rxrpc_reap_client_connections; |
| 95 | + extern unsigned long rxrpc_conn_idle_client_expiry; |
| 96 | + extern unsigned long rxrpc_conn_idle_client_fast_expiry; |
| 97 | + - |
| 98 | + -void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local); |
| 99 | + -struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace); |
| 100 | + -void rxrpc_put_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace); |
| 101 | + -int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp); |
| 102 | + +extern struct idr rxrpc_client_conn_ids; |
| 103 | + + |
| 104 | + +void rxrpc_destroy_client_conn_ids(void); |
| 105 | + +struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *); |
| 106 | + +void rxrpc_put_bundle(struct rxrpc_bundle *); |
| 107 | + +int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *, |
| 108 | + + struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *, |
| 109 | + + gfp_t); |
| 110 | + void rxrpc_expose_client_call(struct rxrpc_call *); |
| 111 | + void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *); |
| 112 | +++<<<<<<< HEAD |
| 113 | + +void rxrpc_put_client_conn(struct rxrpc_connection *); |
| 114 | + +void rxrpc_discard_expired_client_conns(struct work_struct *); |
| 115 | + +void rxrpc_destroy_all_client_connections(struct rxrpc_net *); |
| 116 | +++======= |
| 117 | ++ void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle); |
| 118 | ++ void rxrpc_put_client_conn(struct rxrpc_connection *, enum rxrpc_conn_trace); |
| 119 | ++ void rxrpc_discard_expired_client_conns(struct rxrpc_local *local); |
| 120 | +++>>>>>>> 0d6bf319bc5a (rxrpc: Move the client conn cache management to the I/O thread) |
| 121 | + void rxrpc_clean_up_local_conns(struct rxrpc_local *); |
| 122 | + |
| 123 | + /* |
| 124 | +diff --cc net/rxrpc/conn_client.c |
| 125 | +index 827c1308297c,ebb43f65ebc5..000000000000 |
| 126 | +--- a/net/rxrpc/conn_client.c |
| 127 | ++++ b/net/rxrpc/conn_client.c |
| 128 | +@@@ -571,7 -578,7 +571,11 @@@ static void rxrpc_activate_one_channel( |
| 129 | + */ |
| 130 | + static void rxrpc_unidle_conn(struct rxrpc_bundle *bundle, struct rxrpc_connection *conn) |
| 131 | + { |
| 132 | +++<<<<<<< HEAD |
| 133 | + + struct rxrpc_net *rxnet = bundle->params.local->rxnet; |
| 134 | +++======= |
| 135 | ++ struct rxrpc_local *local = bundle->local; |
| 136 | +++>>>>>>> 0d6bf319bc5a (rxrpc: Move the client conn cache management to the I/O thread) |
| 137 | + bool drop_ref; |
| 138 | + |
| 139 | + if (!list_empty(&conn->cache_link)) { |
| 140 | +@@@ -581,9 -588,9 +585,9 @@@ |
| 141 | + list_del_init(&conn->cache_link); |
| 142 | + drop_ref = true; |
| 143 | + } |
| 144 | +- spin_unlock(&rxnet->client_conn_cache_lock); |
| 145 | ++ spin_unlock(&local->client_conn_cache_lock); |
| 146 | + if (drop_ref) |
| 147 | + - rxrpc_put_connection(conn, rxrpc_conn_put_unidle); |
| 148 | + + rxrpc_put_connection(conn); |
| 149 | + } |
| 150 | + } |
| 151 | + |
| 152 | +@@@ -700,22 -707,18 +704,31 @@@ out |
| 153 | + * find a connection for a call |
| 154 | + * - called in process context with IRQs enabled |
| 155 | + */ |
| 156 | + -int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp) |
| 157 | + +int rxrpc_connect_call(struct rxrpc_sock *rx, |
| 158 | + + struct rxrpc_call *call, |
| 159 | + + struct rxrpc_conn_parameters *cp, |
| 160 | + + struct sockaddr_rxrpc *srx, |
| 161 | + + gfp_t gfp) |
| 162 | + { |
| 163 | + struct rxrpc_bundle *bundle; |
| 164 | +++<<<<<<< HEAD |
| 165 | + + struct rxrpc_net *rxnet = cp->local->rxnet; |
| 166 | +++======= |
| 167 | +++>>>>>>> 0d6bf319bc5a (rxrpc: Move the client conn cache management to the I/O thread) |
| 168 | + int ret = 0; |
| 169 | + |
| 170 | + _enter("{%d,%lx},", call->debug_id, call->user_call_ID); |
| 171 | + |
| 172 | +++<<<<<<< HEAD |
| 173 | + + rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); |
| 174 | + + |
| 175 | + + bundle = rxrpc_prep_call(rx, call, cp, srx, gfp); |
| 176 | +++======= |
| 177 | ++ rxrpc_get_call(call, rxrpc_call_get_io_thread); |
| 178 | ++ |
| 179 | ++ bundle = rxrpc_prep_call(call, gfp); |
| 180 | +++>>>>>>> 0d6bf319bc5a (rxrpc: Move the client conn cache management to the I/O thread) |
| 181 | + if (IS_ERR(bundle)) { |
| 182 | + - rxrpc_put_call(call, rxrpc_call_get_io_thread); |
| 183 | + ret = PTR_ERR(bundle); |
| 184 | + goto out; |
| 185 | + } |
| 186 | +@@@ -797,7 -801,7 +810,11 @@@ void rxrpc_disconnect_client_call(struc |
| 187 | + { |
| 188 | + struct rxrpc_connection *conn; |
| 189 | + struct rxrpc_channel *chan = NULL; |
| 190 | +++<<<<<<< HEAD |
| 191 | + + struct rxrpc_net *rxnet = bundle->params.local->rxnet; |
| 192 | +++======= |
| 193 | ++ struct rxrpc_local *local = bundle->local; |
| 194 | +++>>>>>>> 0d6bf319bc5a (rxrpc: Move the client conn cache management to the I/O thread) |
| 195 | + unsigned int channel; |
| 196 | + bool may_reuse; |
| 197 | + u32 cid; |
| 198 | +@@@ -887,12 -890,12 +904,19 @@@ |
| 199 | + trace_rxrpc_client(conn, channel, rxrpc_client_to_idle); |
| 200 | + conn->idle_timestamp = jiffies; |
| 201 | + |
| 202 | +++<<<<<<< HEAD |
| 203 | + + rxrpc_get_connection(conn); |
| 204 | + + spin_lock(&rxnet->client_conn_cache_lock); |
| 205 | + + list_move_tail(&conn->cache_link, &rxnet->idle_client_conns); |
| 206 | + + spin_unlock(&rxnet->client_conn_cache_lock); |
| 207 | +- |
| 208 | +- rxrpc_set_client_reap_timer(rxnet); |
| 209 | +++======= |
| 210 | ++ rxrpc_get_connection(conn, rxrpc_conn_get_idle); |
| 211 | ++ spin_lock(&local->client_conn_cache_lock); |
| 212 | ++ list_move_tail(&conn->cache_link, &local->idle_client_conns); |
| 213 | ++ spin_unlock(&local->client_conn_cache_lock); |
| 214 | +++>>>>>>> 0d6bf319bc5a (rxrpc: Move the client conn cache management to the I/O thread) |
| 215 | ++ |
| 216 | ++ rxrpc_set_client_reap_timer(local); |
| 217 | + } |
| 218 | + |
| 219 | + out: |
| 220 | +@@@ -1009,12 -995,6 +1031,15 @@@ void rxrpc_discard_expired_client_conns |
| 221 | + return; |
| 222 | + } |
| 223 | + |
| 224 | +++<<<<<<< HEAD |
| 225 | + + /* Don't double up on the discarding */ |
| 226 | + + if (!spin_trylock(&rxnet->client_conn_discard_lock)) { |
| 227 | + + _leave(" [already]"); |
| 228 | + + return; |
| 229 | + + } |
| 230 | + + |
| 231 | +++======= |
| 232 | +++>>>>>>> 0d6bf319bc5a (rxrpc: Move the client conn cache management to the I/O thread) |
| 233 | + /* We keep an estimate of what the number of conns ought to be after |
| 234 | + * we've discarded some so that we don't overdo the discarding. |
| 235 | + */ |
| 236 | +@@@ -1051,10 -1031,12 +1076,10 @@@ next |
| 237 | + trace_rxrpc_client(conn, -1, rxrpc_client_discard); |
| 238 | + list_del_init(&conn->cache_link); |
| 239 | + |
| 240 | +- spin_unlock(&rxnet->client_conn_cache_lock); |
| 241 | ++ spin_unlock(&local->client_conn_cache_lock); |
| 242 | + |
| 243 | + rxrpc_unbundle_conn(conn); |
| 244 | + - /* Drop the ->cache_link ref */ |
| 245 | + - rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle); |
| 246 | + + rxrpc_put_connection(conn); /* Drop the ->cache_link ref */ |
| 247 | + |
| 248 | + nr_conns--; |
| 249 | + goto next; |
| 250 | +@@@ -1068,32 -1050,11 +1093,36 @@@ not_yet_expired |
| 251 | + * then things get messier. |
| 252 | + */ |
| 253 | + _debug("not yet"); |
| 254 | +- if (!rxnet->kill_all_client_conns) |
| 255 | +- timer_reduce(&rxnet->client_conn_reap_timer, conn_expires_at); |
| 256 | ++ if (!local->kill_all_client_conns) |
| 257 | ++ timer_reduce(&local->client_conn_reap_timer, conn_expires_at); |
| 258 | + |
| 259 | + out: |
| 260 | +++<<<<<<< HEAD |
| 261 | + + spin_unlock(&rxnet->client_conn_cache_lock); |
| 262 | + + spin_unlock(&rxnet->client_conn_discard_lock); |
| 263 | + + _leave(""); |
| 264 | + +} |
| 265 | + + |
| 266 | + +/* |
| 267 | + + * Preemptively destroy all the client connection records rather than waiting |
| 268 | + + * for them to time out |
| 269 | + + */ |
| 270 | + +void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet) |
| 271 | + +{ |
| 272 | + + _enter(""); |
| 273 | + + |
| 274 | + + spin_lock(&rxnet->client_conn_cache_lock); |
| 275 | + + rxnet->kill_all_client_conns = true; |
| 276 | + + spin_unlock(&rxnet->client_conn_cache_lock); |
| 277 | + + |
| 278 | + + del_timer_sync(&rxnet->client_conn_reap_timer); |
| 279 | + + |
| 280 | + + if (!rxrpc_queue_work(&rxnet->client_conn_reaper)) |
| 281 | + + _debug("destroy: queue failed"); |
| 282 | + + |
| 283 | +++======= |
| 284 | ++ spin_unlock(&local->client_conn_cache_lock); |
| 285 | +++>>>>>>> 0d6bf319bc5a (rxrpc: Move the client conn cache management to the I/O thread) |
| 286 | + _leave(""); |
| 287 | + } |
| 288 | + |
| 289 | +@@@ -1108,11 -1068,18 +1136,17 @@@ void rxrpc_clean_up_local_conns(struct |
| 290 | + |
| 291 | + _enter(""); |
| 292 | + |
| 293 | +- spin_lock(&rxnet->client_conn_cache_lock); |
| 294 | ++ spin_lock(&local->client_conn_cache_lock); |
| 295 | ++ local->kill_all_client_conns = true; |
| 296 | ++ spin_unlock(&local->client_conn_cache_lock); |
| 297 | ++ |
| 298 | ++ del_timer_sync(&local->client_conn_reap_timer); |
| 299 | + |
| 300 | +- list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns, |
| 301 | ++ spin_lock(&local->client_conn_cache_lock); |
| 302 | ++ |
| 303 | ++ list_for_each_entry_safe(conn, tmp, &local->idle_client_conns, |
| 304 | + cache_link) { |
| 305 | + - if (conn->local == local) { |
| 306 | + - atomic_dec(&conn->active); |
| 307 | + + if (conn->params.local == local) { |
| 308 | + trace_rxrpc_client(conn, -1, rxrpc_client_discard); |
| 309 | + list_move(&conn->cache_link, &graveyard); |
| 310 | + } |
| 311 | +diff --cc net/rxrpc/local_object.c |
| 312 | +index 846558613c7f,9bc8d08ca12c..000000000000 |
| 313 | +--- a/net/rxrpc/local_object.c |
| 314 | ++++ b/net/rxrpc/local_object.c |
| 315 | +@@@ -81,16 -105,23 +91,30 @@@ static struct rxrpc_local *rxrpc_alloc_ |
| 316 | + if (local) { |
| 317 | + refcount_set(&local->ref, 1); |
| 318 | + atomic_set(&local->active_users, 1); |
| 319 | + - local->net = net; |
| 320 | + - local->rxnet = rxrpc_net(net); |
| 321 | + + local->rxnet = rxnet; |
| 322 | + INIT_HLIST_NODE(&local->link); |
| 323 | + + INIT_WORK(&local->processor, rxrpc_local_processor); |
| 324 | + + INIT_LIST_HEAD(&local->ack_tx_queue); |
| 325 | + + spin_lock_init(&local->ack_tx_lock); |
| 326 | + init_rwsem(&local->defrag_sem); |
| 327 | +++<<<<<<< HEAD |
| 328 | + + skb_queue_head_init(&local->reject_queue); |
| 329 | + + skb_queue_head_init(&local->event_queue); |
| 330 | +++======= |
| 331 | ++ init_completion(&local->io_thread_ready); |
| 332 | ++ skb_queue_head_init(&local->rx_queue); |
| 333 | ++ INIT_LIST_HEAD(&local->conn_attend_q); |
| 334 | ++ INIT_LIST_HEAD(&local->call_attend_q); |
| 335 | ++ |
| 336 | +++>>>>>>> 0d6bf319bc5a (rxrpc: Move the client conn cache management to the I/O thread) |
| 337 | + local->client_bundles = RB_ROOT; |
| 338 | + spin_lock_init(&local->client_bundles_lock); |
| 339 | ++ local->kill_all_client_conns = false; |
| 340 | ++ spin_lock_init(&local->client_conn_cache_lock); |
| 341 | ++ INIT_LIST_HEAD(&local->idle_client_conns); |
| 342 | ++ timer_setup(&local->client_conn_reap_timer, |
| 343 | ++ rxrpc_client_conn_reap_timeout, 0); |
| 344 | ++ |
| 345 | + spin_lock_init(&local->lock); |
| 346 | + rwlock_init(&local->services_lock); |
| 347 | + local->debug_id = atomic_inc_return(&rxrpc_debug_id); |
| 348 | +diff --cc net/rxrpc/net_ns.c |
| 349 | +index 84242c0e467c,a0319c040c25..000000000000 |
| 350 | +--- a/net/rxrpc/net_ns.c |
| 351 | ++++ b/net/rxrpc/net_ns.c |
| 352 | +@@@ -63,14 -54,6 +54,17 @@@ static __net_init int rxrpc_init_net(st |
| 353 | + rxrpc_service_conn_reap_timeout, 0); |
| 354 | + |
| 355 | + atomic_set(&rxnet->nr_client_conns, 0); |
| 356 | +++<<<<<<< HEAD |
| 357 | + + rxnet->kill_all_client_conns = false; |
| 358 | + + spin_lock_init(&rxnet->client_conn_cache_lock); |
| 359 | + + spin_lock_init(&rxnet->client_conn_discard_lock); |
| 360 | + + INIT_LIST_HEAD(&rxnet->idle_client_conns); |
| 361 | + + INIT_WORK(&rxnet->client_conn_reaper, |
| 362 | + + rxrpc_discard_expired_client_conns); |
| 363 | + + timer_setup(&rxnet->client_conn_reap_timer, |
| 364 | + + rxrpc_client_conn_reap_timeout, 0); |
| 365 | +++======= |
| 366 | +++>>>>>>> 0d6bf319bc5a (rxrpc: Move the client conn cache management to the I/O thread) |
| 367 | + |
| 368 | + INIT_HLIST_HEAD(&rxnet->local_endpoints); |
| 369 | + mutex_init(&rxnet->local_mutex); |
| 370 | +* Unmerged path net/rxrpc/io_thread.c |
| 371 | +* Unmerged path net/rxrpc/ar-internal.h |
| 372 | +* Unmerged path net/rxrpc/conn_client.c |
| 373 | +diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c |
| 374 | +index 156bd26daf74..10f581ced60b 100644 |
| 375 | +--- a/net/rxrpc/conn_object.c |
| 376 | ++++ b/net/rxrpc/conn_object.c |
| 377 | +@@ -462,7 +462,6 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet) |
| 378 | + _enter(""); |
| 379 | + |
| 380 | + atomic_dec(&rxnet->nr_conns); |
| 381 | +- rxrpc_destroy_all_client_connections(rxnet); |
| 382 | + |
| 383 | + del_timer_sync(&rxnet->service_conn_reap_timer); |
| 384 | + rxrpc_queue_work(&rxnet->service_conn_reaper); |
| 385 | +* Unmerged path net/rxrpc/io_thread.c |
| 386 | +* Unmerged path net/rxrpc/local_object.c |
| 387 | +* Unmerged path net/rxrpc/net_ns.c |
0 commit comments