@@ -1197,6 +1197,176 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
11971197 return 0 ;
11981198}
11991199
1200+ enum dm_wrappedkey_op {
1201+ DERIVE_SW_SECRET ,
1202+ IMPORT_KEY ,
1203+ GENERATE_KEY ,
1204+ PREPARE_KEY ,
1205+ };
1206+
1207+ struct dm_wrappedkey_op_args {
1208+ enum dm_wrappedkey_op op ;
1209+ int err ;
1210+ union {
1211+ struct {
1212+ const u8 * eph_key ;
1213+ size_t eph_key_size ;
1214+ u8 * sw_secret ;
1215+ } derive_sw_secret ;
1216+ struct {
1217+ const u8 * raw_key ;
1218+ size_t raw_key_size ;
1219+ u8 * lt_key ;
1220+ } import_key ;
1221+ struct {
1222+ u8 * lt_key ;
1223+ } generate_key ;
1224+ struct {
1225+ const u8 * lt_key ;
1226+ size_t lt_key_size ;
1227+ u8 * eph_key ;
1228+ } prepare_key ;
1229+ };
1230+ };
1231+
1232+ static int dm_wrappedkey_op_callback (struct dm_target * ti , struct dm_dev * dev ,
1233+ sector_t start , sector_t len , void * data )
1234+ {
1235+ struct dm_wrappedkey_op_args * args = data ;
1236+ struct block_device * bdev = dev -> bdev ;
1237+ struct blk_crypto_profile * profile =
1238+ bdev_get_queue (bdev )-> crypto_profile ;
1239+ int err = - EOPNOTSUPP ;
1240+
1241+ if (!args -> err )
1242+ return 0 ;
1243+
1244+ switch (args -> op ) {
1245+ case DERIVE_SW_SECRET :
1246+ err = blk_crypto_derive_sw_secret (
1247+ bdev ,
1248+ args -> derive_sw_secret .eph_key ,
1249+ args -> derive_sw_secret .eph_key_size ,
1250+ args -> derive_sw_secret .sw_secret );
1251+ break ;
1252+ case IMPORT_KEY :
1253+ err = blk_crypto_import_key (profile ,
1254+ args -> import_key .raw_key ,
1255+ args -> import_key .raw_key_size ,
1256+ args -> import_key .lt_key );
1257+ break ;
1258+ case GENERATE_KEY :
1259+ err = blk_crypto_generate_key (profile ,
1260+ args -> generate_key .lt_key );
1261+ break ;
1262+ case PREPARE_KEY :
1263+ err = blk_crypto_prepare_key (profile ,
1264+ args -> prepare_key .lt_key ,
1265+ args -> prepare_key .lt_key_size ,
1266+ args -> prepare_key .eph_key );
1267+ break ;
1268+ }
1269+ args -> err = err ;
1270+
1271+ /* Try another device in case this fails. */
1272+ return 0 ;
1273+ }
1274+
1275+ static int dm_exec_wrappedkey_op (struct blk_crypto_profile * profile ,
1276+ struct dm_wrappedkey_op_args * args )
1277+ {
1278+ struct mapped_device * md =
1279+ container_of (profile , struct dm_crypto_profile , profile )-> md ;
1280+ struct dm_target * ti ;
1281+ struct dm_table * t ;
1282+ int srcu_idx ;
1283+ int i ;
1284+
1285+ args -> err = - EOPNOTSUPP ;
1286+
1287+ t = dm_get_live_table (md , & srcu_idx );
1288+ if (!t )
1289+ goto out ;
1290+
1291+ /*
1292+ * blk-crypto currently has no support for multiple incompatible
1293+ * implementations of wrapped inline crypto keys on a single system.
1294+ * It was already checked earlier that support for wrapped keys was
1295+ * declared on all underlying devices. Thus, all the underlying devices
1296+ * should support all wrapped key operations and they should behave
1297+ * identically, i.e. work with the same keys. So, just executing the
1298+ * operation on the first device on which it works suffices for now.
1299+ */
1300+ for (i = 0 ; i < t -> num_targets ; i ++ ) {
1301+ ti = dm_table_get_target (t , i );
1302+ if (!ti -> type -> iterate_devices )
1303+ continue ;
1304+ ti -> type -> iterate_devices (ti , dm_wrappedkey_op_callback , args );
1305+ if (!args -> err )
1306+ break ;
1307+ }
1308+ out :
1309+ dm_put_live_table (md , srcu_idx );
1310+ return args -> err ;
1311+ }
1312+
1313+ static int dm_derive_sw_secret (struct blk_crypto_profile * profile ,
1314+ const u8 * eph_key , size_t eph_key_size ,
1315+ u8 sw_secret [BLK_CRYPTO_SW_SECRET_SIZE ])
1316+ {
1317+ struct dm_wrappedkey_op_args args = {
1318+ .op = DERIVE_SW_SECRET ,
1319+ .derive_sw_secret = {
1320+ .eph_key = eph_key ,
1321+ .eph_key_size = eph_key_size ,
1322+ .sw_secret = sw_secret ,
1323+ },
1324+ };
1325+ return dm_exec_wrappedkey_op (profile , & args );
1326+ }
1327+
1328+ static int dm_import_key (struct blk_crypto_profile * profile ,
1329+ const u8 * raw_key , size_t raw_key_size ,
1330+ u8 lt_key [BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE ])
1331+ {
1332+ struct dm_wrappedkey_op_args args = {
1333+ .op = IMPORT_KEY ,
1334+ .import_key = {
1335+ .raw_key = raw_key ,
1336+ .raw_key_size = raw_key_size ,
1337+ .lt_key = lt_key ,
1338+ },
1339+ };
1340+ return dm_exec_wrappedkey_op (profile , & args );
1341+ }
1342+
1343+ static int dm_generate_key (struct blk_crypto_profile * profile ,
1344+ u8 lt_key [BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE ])
1345+ {
1346+ struct dm_wrappedkey_op_args args = {
1347+ .op = GENERATE_KEY ,
1348+ .generate_key = {
1349+ .lt_key = lt_key ,
1350+ },
1351+ };
1352+ return dm_exec_wrappedkey_op (profile , & args );
1353+ }
1354+
1355+ static int dm_prepare_key (struct blk_crypto_profile * profile ,
1356+ const u8 * lt_key , size_t lt_key_size ,
1357+ u8 eph_key [BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE ])
1358+ {
1359+ struct dm_wrappedkey_op_args args = {
1360+ .op = PREPARE_KEY ,
1361+ .prepare_key = {
1362+ .lt_key = lt_key ,
1363+ .lt_key_size = lt_key_size ,
1364+ .eph_key = eph_key ,
1365+ },
1366+ };
1367+ return dm_exec_wrappedkey_op (profile , & args );
1368+ }
1369+
12001370static int
12011371device_intersect_crypto_capabilities (struct dm_target * ti , struct dm_dev * dev ,
12021372 sector_t start , sector_t len , void * data )
@@ -1271,6 +1441,13 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
12711441 profile );
12721442 }
12731443
1444+ if (profile -> key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED ) {
1445+ profile -> ll_ops .derive_sw_secret = dm_derive_sw_secret ;
1446+ profile -> ll_ops .import_key = dm_import_key ;
1447+ profile -> ll_ops .generate_key = dm_generate_key ;
1448+ profile -> ll_ops .prepare_key = dm_prepare_key ;
1449+ }
1450+
12741451 if (t -> md -> queue &&
12751452 !blk_crypto_has_capabilities (profile ,
12761453 t -> md -> queue -> crypto_profile )) {
0 commit comments