2020#include "cifs_unicode.h"
2121#include "smb2glob.h"
2222#include "dns_resolve.h"
23+ #include "dfs.h"
2324
2425#include "dfs_cache.h"
2526
26- #define CACHE_HTABLE_SIZE 32
27- #define CACHE_MAX_ENTRIES 64
28- #define CACHE_MIN_TTL 120 /* 2 minutes */
27+ #define CACHE_HTABLE_SIZE 32
28+ #define CACHE_MAX_ENTRIES 64
29+ #define CACHE_MIN_TTL 120 /* 2 minutes */
30+ #define CACHE_DEFAULT_TTL 300 /* 5 minutes */
2931
3032#define IS_DFS_INTERLINK (v ) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
3133
@@ -50,10 +52,9 @@ struct cache_entry {
5052};
5153
5254static struct kmem_cache * cache_slab __read_mostly ;
53- static struct workqueue_struct * dfscache_wq __read_mostly ;
55+ struct workqueue_struct * dfscache_wq ;
5456
55- static int cache_ttl ;
56- static DEFINE_SPINLOCK (cache_ttl_lock );
57+ atomic_t dfs_cache_ttl ;
5758
5859static struct nls_table * cache_cp ;
5960
@@ -65,10 +66,6 @@ static atomic_t cache_count;
6566static struct hlist_head cache_htable [CACHE_HTABLE_SIZE ];
6667static DECLARE_RWSEM (htable_rw_lock );
6768
68- static void refresh_cache_worker (struct work_struct * work );
69-
70- static DECLARE_DELAYED_WORK (refresh_task , refresh_cache_worker );
71-
7269/**
7370 * dfs_cache_canonical_path - get a canonical DFS path
7471 *
@@ -290,7 +287,9 @@ int dfs_cache_init(void)
290287 int rc ;
291288 int i ;
292289
293- dfscache_wq = alloc_workqueue ("cifs-dfscache" , WQ_FREEZABLE | WQ_UNBOUND , 1 );
290+ dfscache_wq = alloc_workqueue ("cifs-dfscache" ,
291+ WQ_UNBOUND |WQ_FREEZABLE |WQ_MEM_RECLAIM ,
292+ 0 );
294293 if (!dfscache_wq )
295294 return - ENOMEM ;
296295
@@ -306,6 +305,7 @@ int dfs_cache_init(void)
306305 INIT_HLIST_HEAD (& cache_htable [i ]);
307306
308307 atomic_set (& cache_count , 0 );
308+ atomic_set (& dfs_cache_ttl , CACHE_DEFAULT_TTL );
309309 cache_cp = load_nls ("utf8" );
310310 if (!cache_cp )
311311 cache_cp = load_nls_default ();
@@ -480,6 +480,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
480480 int rc ;
481481 struct cache_entry * ce ;
482482 unsigned int hash ;
483+ int ttl ;
483484
484485 WARN_ON (!rwsem_is_locked (& htable_rw_lock ));
485486
@@ -496,15 +497,8 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
496497 if (IS_ERR (ce ))
497498 return ce ;
498499
499- spin_lock (& cache_ttl_lock );
500- if (!cache_ttl ) {
501- cache_ttl = ce -> ttl ;
502- queue_delayed_work (dfscache_wq , & refresh_task , cache_ttl * HZ );
503- } else {
504- cache_ttl = min_t (int , cache_ttl , ce -> ttl );
505- mod_delayed_work (dfscache_wq , & refresh_task , cache_ttl * HZ );
506- }
507- spin_unlock (& cache_ttl_lock );
500+ ttl = min_t (int , atomic_read (& dfs_cache_ttl ), ce -> ttl );
501+ atomic_set (& dfs_cache_ttl , ttl );
508502
509503 hlist_add_head (& ce -> hlist , & cache_htable [hash ]);
510504 dump_ce (ce );
@@ -616,7 +610,6 @@ static struct cache_entry *lookup_cache_entry(const char *path)
616610 */
617611void dfs_cache_destroy (void )
618612{
619- cancel_delayed_work_sync (& refresh_task );
620613 unload_nls (cache_cp );
621614 flush_cache_ents ();
622615 kmem_cache_destroy (cache_slab );
@@ -1142,6 +1135,7 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
11421135 * target shares in @refs.
11431136 */
11441137static void mark_for_reconnect_if_needed (struct TCP_Server_Info * server ,
1138+ const char * path ,
11451139 struct dfs_cache_tgt_list * old_tl ,
11461140 struct dfs_cache_tgt_list * new_tl )
11471141{
@@ -1153,22 +1147,39 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
11531147 nit = dfs_cache_get_next_tgt (new_tl , nit )) {
11541148 if (target_share_equal (server ,
11551149 dfs_cache_get_tgt_name (oit ),
1156- dfs_cache_get_tgt_name (nit )))
1150+ dfs_cache_get_tgt_name (nit ))) {
1151+ dfs_cache_noreq_update_tgthint (path , nit );
11571152 return ;
1153+ }
11581154 }
11591155 }
11601156
11611157 cifs_dbg (FYI , "%s: no cached or matched targets. mark dfs share for reconnect.\n" , __func__ );
11621158 cifs_signal_cifsd_for_reconnect (server , true);
11631159}
11641160
1161+ static bool is_ses_good (struct cifs_ses * ses )
1162+ {
1163+ struct TCP_Server_Info * server = ses -> server ;
1164+ struct cifs_tcon * tcon = ses -> tcon_ipc ;
1165+ bool ret ;
1166+
1167+ spin_lock (& ses -> ses_lock );
1168+ spin_lock (& ses -> chan_lock );
1169+ ret = !cifs_chan_needs_reconnect (ses , server ) &&
1170+ ses -> ses_status == SES_GOOD &&
1171+ !tcon -> need_reconnect ;
1172+ spin_unlock (& ses -> chan_lock );
1173+ spin_unlock (& ses -> ses_lock );
1174+ return ret ;
1175+ }
1176+
11651177/* Refresh dfs referral of tcon and mark it for reconnect if needed */
1166- static int __refresh_tcon (const char * path , struct cifs_tcon * tcon , bool force_refresh )
1178+ static int __refresh_tcon (const char * path , struct cifs_ses * ses , bool force_refresh )
11671179{
11681180 struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT (old_tl );
11691181 struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT (new_tl );
1170- struct cifs_ses * ses = CIFS_DFS_ROOT_SES (tcon -> ses );
1171- struct cifs_tcon * ipc = ses -> tcon_ipc ;
1182+ struct TCP_Server_Info * server = ses -> server ;
11721183 bool needs_refresh = false;
11731184 struct cache_entry * ce ;
11741185 unsigned int xid ;
@@ -1190,20 +1201,19 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
11901201 goto out ;
11911202 }
11921203
1193- spin_lock ( & ipc -> tc_lock );
1194- if (ipc -> status != TID_GOOD ) {
1195- spin_unlock ( & ipc -> tc_lock );
1196- cifs_dbg ( FYI , "%s: skip cache refresh due to disconnected ipc\n" , __func__ );
1204+ ses = CIFS_DFS_ROOT_SES ( ses );
1205+ if (! is_ses_good ( ses ) ) {
1206+ cifs_dbg ( FYI , "%s: skip cache refresh due to disconnected ipc\n" ,
1207+ __func__ );
11971208 goto out ;
11981209 }
1199- spin_unlock (& ipc -> tc_lock );
12001210
12011211 ce = cache_refresh_path (xid , ses , path , true);
12021212 if (!IS_ERR (ce )) {
12031213 rc = get_targets (ce , & new_tl );
12041214 up_read (& htable_rw_lock );
12051215 cifs_dbg (FYI , "%s: get_targets: %d\n" , __func__ , rc );
1206- mark_for_reconnect_if_needed (tcon -> ses -> server , & old_tl , & new_tl );
1216+ mark_for_reconnect_if_needed (server , path , & old_tl , & new_tl );
12071217 }
12081218
12091219out :
@@ -1216,10 +1226,11 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
12161226static int refresh_tcon (struct cifs_tcon * tcon , bool force_refresh )
12171227{
12181228 struct TCP_Server_Info * server = tcon -> ses -> server ;
1229+ struct cifs_ses * ses = tcon -> ses ;
12191230
12201231 mutex_lock (& server -> refpath_lock );
12211232 if (server -> leaf_fullpath )
1222- __refresh_tcon (server -> leaf_fullpath + 1 , tcon , force_refresh );
1233+ __refresh_tcon (server -> leaf_fullpath + 1 , ses , force_refresh );
12231234 mutex_unlock (& server -> refpath_lock );
12241235 return 0 ;
12251236}
@@ -1263,60 +1274,32 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
12631274 return refresh_tcon (tcon , true);
12641275}
12651276
1266- /*
1267- * Worker that will refresh DFS cache from all active mounts based on lowest TTL value
1268- * from a DFS referral.
1269- */
1270- static void refresh_cache_worker (struct work_struct * work )
1277+ /* Refresh all DFS referrals related to DFS tcon */
1278+ void dfs_cache_refresh (struct work_struct * work )
12711279{
12721280 struct TCP_Server_Info * server ;
1273- struct cifs_tcon * tcon , * ntcon ;
1274- struct list_head tcons ;
1281+ struct dfs_root_ses * rses ;
1282+ struct cifs_tcon * tcon ;
12751283 struct cifs_ses * ses ;
12761284
1277- INIT_LIST_HEAD (& tcons );
1285+ tcon = container_of (work , struct cifs_tcon , dfs_cache_work .work );
1286+ ses = tcon -> ses ;
1287+ server = ses -> server ;
12781288
1279- spin_lock (& cifs_tcp_ses_lock );
1280- list_for_each_entry (server , & cifs_tcp_ses_list , tcp_ses_list ) {
1281- spin_lock (& server -> srv_lock );
1282- if (!server -> leaf_fullpath ) {
1283- spin_unlock (& server -> srv_lock );
1284- continue ;
1285- }
1286- spin_unlock (& server -> srv_lock );
1287-
1288- list_for_each_entry (ses , & server -> smb_ses_list , smb_ses_list ) {
1289- if (ses -> tcon_ipc ) {
1290- ses -> ses_count ++ ;
1291- list_add_tail (& ses -> tcon_ipc -> ulist , & tcons );
1292- }
1293- list_for_each_entry (tcon , & ses -> tcon_list , tcon_list ) {
1294- if (!tcon -> ipc ) {
1295- tcon -> tc_count ++ ;
1296- list_add_tail (& tcon -> ulist , & tcons );
1297- }
1298- }
1299- }
1300- }
1301- spin_unlock (& cifs_tcp_ses_lock );
1302-
1303- list_for_each_entry_safe (tcon , ntcon , & tcons , ulist ) {
1304- struct TCP_Server_Info * server = tcon -> ses -> server ;
1305-
1306- list_del_init (& tcon -> ulist );
1289+ mutex_lock (& server -> refpath_lock );
1290+ if (server -> leaf_fullpath )
1291+ __refresh_tcon (server -> leaf_fullpath + 1 , ses , false);
1292+ mutex_unlock (& server -> refpath_lock );
13071293
1294+ list_for_each_entry (rses , & tcon -> dfs_ses_list , list ) {
1295+ ses = rses -> ses ;
1296+ server = ses -> server ;
13081297 mutex_lock (& server -> refpath_lock );
13091298 if (server -> leaf_fullpath )
1310- __refresh_tcon (server -> leaf_fullpath + 1 , tcon , false);
1299+ __refresh_tcon (server -> leaf_fullpath + 1 , ses , false);
13111300 mutex_unlock (& server -> refpath_lock );
1312-
1313- if (tcon -> ipc )
1314- cifs_put_smb_ses (tcon -> ses );
1315- else
1316- cifs_put_tcon (tcon );
13171301 }
13181302
1319- spin_lock (& cache_ttl_lock );
1320- queue_delayed_work (dfscache_wq , & refresh_task , cache_ttl * HZ );
1321- spin_unlock (& cache_ttl_lock );
1303+ queue_delayed_work (dfscache_wq , & tcon -> dfs_cache_work ,
1304+ atomic_read (& dfs_cache_ttl ) * HZ );
13221305}
0 commit comments