Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions .github/workflows/upstream-commit-check.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,26 @@ jobs:
with:
python-version: '3.x'

- name: Install build dependencies for patchutils
run: |
sudo apt-get update
sudo apt-get install -y build-essential autoconf automake libtool gnulib

- name: Clone and build custom patchutils
run: |
git clone https://github.com/kerneltoast/patchutils
cd patchutils
./bootstrap
./configure
make -j$(nproc)

- name: Download run_interdiff.py
run: |
curl -sL \
https://raw.githubusercontent.com/ctrliq/kernel-src-tree-tools/hackathon-cve-check/run_interdiff.py \
-o run_interdiff.py
chmod +x run_interdiff.py

- name: Run upstream fixes check
id: checkkernel
run: |
Expand All @@ -44,6 +64,15 @@ jobs:
echo "has_findings=true" >> $GITHUB_OUTPUT
fi

- name: Run interdiff check
id: interdiff
run: |
python3 run_interdiff.py --repo . --pr_branch "${{ github.head_ref }}" --base_branch "${{ github.base_ref }}" --markdown --interdiff ./patchutils/src/interdiff | tee interdiff_result.txt
# Save non-empty results for PR comment
if grep -q -v "All backported commits match their upstream counterparts." interdiff_result.txt; then
echo "has_differences=true" >> $GITHUB_OUTPUT
fi

- name: Comment on PR if issues found
if: steps.checkkernel.outputs.has_findings == 'true'
env:
Expand All @@ -52,3 +81,12 @@ jobs:
gh pr comment ${{ github.event.pull_request.number }} \
--body "$(cat result.txt)" \
--repo ${{ github.repository }}

- name: Comment on PR if interdiff differences found
if: steps.interdiff.outputs.has_differences == 'true'
env:
GH_TOKEN: ${{ github.token }}
run: |
gh pr comment ${{ github.event.pull_request.number }} \
--body "$(cat interdiff_result.txt)" \
--repo ${{ github.repository }}
3 changes: 2 additions & 1 deletion fs/ext4/ext4.h
Original file line number Diff line number Diff line change
Expand Up @@ -1803,7 +1803,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
enum {
EXT4_MF_MNTDIR_SAMPLED,
EXT4_MF_FS_ABORTED, /* Fatal error detected */
EXT4_MF_FC_INELIGIBLE /* Fast commit ineligible */
EXT4_MF_FC_INELIGIBLE, /* Fast commit ineligible */
EXT4_MF_JOURNAL_DESTROY /* Journal is in process of destroying */
};

static inline void ext4_set_mount_flag(struct super_block *sb, int bit)
Expand Down
15 changes: 15 additions & 0 deletions fs/ext4/ext4_jbd2.h
Original file line number Diff line number Diff line change
Expand Up @@ -521,6 +521,21 @@ static inline int ext4_journal_destroy(struct ext4_sb_info *sbi, journal_t *jour
{
int err = 0;

/*
* At this point only two things can be operating on the journal.
* JBD2 thread performing transaction commit and s_sb_upd_work
* issuing sb update through the journal. Once we set
* EXT4_JOURNAL_DESTROY, new ext4_handle_error() calls will not
* queue s_sb_upd_work and ext4_force_commit() makes sure any
* ext4_handle_error() calls from the running transaction commit are
* finished. Hence no new s_sb_upd_work can be queued after we
* flush it here.
*/
ext4_set_mount_flag(sbi->s_sb, EXT4_MF_JOURNAL_DESTROY);

ext4_force_commit(sbi->s_sb);
flush_work(&sbi->s_sb_upd_work);

err = jbd2_journal_destroy(journal);
sbi->s_journal = NULL;

Expand Down
16 changes: 8 additions & 8 deletions fs/ext4/super.c
Original file line number Diff line number Diff line change
Expand Up @@ -668,9 +668,13 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
* In case the fs should keep running, we need to writeout
* superblock through the journal. Due to lock ordering
* constraints, it may not be safe to do it right here so we
* defer superblock flushing to a workqueue.
* defer superblock flushing to a workqueue. We just need to be
* careful when the journal is already shutting down. If we get
* here in that case, just update the sb directly as the last
* transaction won't commit anyway.
*/
if (continue_fs && journal)
if (continue_fs && journal &&
!ext4_test_mount_flag(sb, EXT4_MF_JOURNAL_DESTROY))
schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
else
ext4_commit_super(sb);
Expand Down Expand Up @@ -1203,7 +1207,6 @@ static void ext4_put_super(struct super_block *sb)
ext4_unregister_li_request(sb);
ext4_quota_off_umount(sb);

flush_work(&sbi->s_sb_upd_work);
destroy_workqueue(sbi->rsv_conversion_wq);
ext4_release_orphan_info(sb);

Expand All @@ -1213,7 +1216,8 @@ static void ext4_put_super(struct super_block *sb)
if ((err < 0) && !aborted) {
ext4_abort(sb, -err, "Couldn't clean up the journal");
}
}
} else
flush_work(&sbi->s_sb_upd_work);

ext4_es_unregister_shrinker(sbi);
del_timer_sync(&sbi->s_err_report);
Expand Down Expand Up @@ -4891,8 +4895,6 @@ static int ext4_load_and_init_journal(struct super_block *sb,
return 0;

out:
/* flush s_sb_upd_work before destroying the journal. */
flush_work(&sbi->s_sb_upd_work);
ext4_journal_destroy(sbi, sbi->s_journal);
return -EINVAL;
}
Expand Down Expand Up @@ -5599,8 +5601,6 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
sbi->s_ea_block_cache = NULL;

if (sbi->s_journal) {
/* flush s_sb_upd_work before journal destroy. */
flush_work(&sbi->s_sb_upd_work);
ext4_journal_destroy(sbi, sbi->s_journal);
}
failed_mount3a:
Expand Down