Merge android-4.14.169 (239034f) into msm-4.14

* refs/heads/tmp-239034f:
  Linux 4.14.169
  net/x25: fix nonblocking connect
  netfilter: ipset: use bitmap infrastructure completely
  bitmap: Add bitmap_alloc(), bitmap_zalloc() and bitmap_free()
  md: Avoid namespace collision with bitmap API
  scsi: iscsi: Avoid potential deadlock in iscsi_if_rx func
  media: v4l2-ioctl.c: zero reserved fields for S/TRY_FMT
  libertas: Fix two buffer overflows at parsing bss descriptor
  coresight: tmc-etf: Do not call smp_processor_id from preemptible
  coresight: etb10: Do not call smp_processor_id from preemptible
  sd: Fix REQ_OP_ZONE_REPORT completion handling
  do_last(): fetch directory ->i_mode and ->i_uid before it's too late
  tracing: xen: Ordered comparison of function pointers
  scsi: RDMA/isert: Fix a recently introduced regression related to logout
  hwmon: (nct7802) Fix voltage limits to wrong registers
  Input: sun4i-ts - add a check for devm_thermal_zone_of_sensor_register
  Input: pegasus_notetaker - fix endpoint sanity check
  Input: aiptek - fix endpoint sanity check
  Input: gtco - fix endpoint sanity check
  Input: sur40 - fix interface sanity checks
  Input: pm8xxx-vib - fix handling of separate enable register
  Documentation: Document arm64 kpti control
  mmc: sdhci: fix minimum clock rate for v3 controller
  mmc: tegra: fix SDR50 tuning override
  ARM: 8950/1: ftrace/recordmcount: filter relocation types
  Revert "Input: synaptics-rmi4 - don't increment rmiaddr for SMBus transfers"
  Input: keyspan-remote - fix control-message timeouts
  hwmon: (core) Do not use device managed functions for memory allocations
  hwmon: (core) Fix double-free in __hwmon_device_register()
  hwmon: Deal with errors from the thermal subsystem
  hwmon: (adt7475) Make volt2reg return same reg as reg2volt input
  net: rtnetlink: validate IFLA_MTU attribute in rtnl_create_link()
  tcp_bbr: improve arithmetic division in bbr_update_bw()
  net: usb: lan78xx: Add .ndo_features_check
  net-sysfs: Fix reference count leak
  net-sysfs: Call dev_hold always in rx_queue_add_kobject
  net-sysfs: Call dev_hold always in netdev_queue_add_kobject
  net-sysfs: fix netdev_queue_add_kobject() breakage
  net-sysfs: Fix reference count leak in rx|netdev_queue_add_kobject
  net_sched: fix datalen for ematch
  net, ip_tunnel: fix namespaces move
  net, ip6_tunnel: fix namespaces move
  net: cxgb3_main: Add CAP_NET_ADMIN check to CHELSIO_GET_MEM
  ipv6: sr: remove SKB_GSO_IPXIP6 on End.D* actions
  gtp: make sure only SOCK_DGRAM UDP sockets are accepted
  firestream: fix memory leaks
  can, slip: Protect tty->disc_data in write_wakeup and close with RCU
  UPSTREAM: staging: most: net: fix buffer overflow
  ANDROID: Fixing incremental fs style issues
  ANDROID: Make incfs selftests pass
  ANDROID: Initial commit of Incremental FS
  ANDROID: cuttlefish_defconfig: Enable CONFIG_BTT

 New header file entries are added to .bp files.

Change-Id: I521b976a19c8993b0047ab06e6d42b5107c234a3
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
Srinivasarao P 2020-06-30 21:41:02 +05:30
commit f34d8ef8e8
78 changed files with 8613 additions and 178 deletions

View File

@ -1867,6 +1867,12 @@
Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y,
the default is off.
kpti= [ARM64] Control page table isolation of user
and kernel address spaces.
Default: enabled on cores which need mitigation.
0: force disabled
1: force enabled
kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
Default is 0 (don't ignore, but inject #GP)

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 168
SUBLEVEL = 169
EXTRAVERSION =
NAME = Petit Gorille

View File

@ -422,7 +422,6 @@ CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_LIBNVDIMM=y
# CONFIG_ND_BLK is not set
# CONFIG_BTT is not set
CONFIG_ARM_SCPI_PROTOCOL=y
# CONFIG_ARM_SCPI_POWER_DOMAIN is not set
CONFIG_EXT4_FS=y

View File

@ -436,7 +436,6 @@ CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_LIBNVDIMM=y
# CONFIG_ND_BLK is not set
# CONFIG_BTT is not set
# CONFIG_FIRMWARE_MEMMAP is not set
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y

View File

@ -927,6 +927,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
}
if (!to) {
printk ("No more free channels for FS50..\n");
kfree(vcc);
return -EBUSY;
}
vcc->channo = dev->channo;
@ -937,6 +938,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
printk ("Channel is in use for FS155.\n");
kfree(vcc);
return -EBUSY;
}
}
@ -950,6 +952,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
tc, sizeof (struct fs_transmit_config));
if (!tc) {
fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
kfree(vcc);
return -ENOMEM;
}

View File

@ -297,9 +297,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
long reg;
if (bypass_attn & (1 << channel))
reg = (volt * 1024) / 2250;
reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
else
reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
(r[0] + r[1]) * 2250);
return clamp_val(reg, 0, 1023) & (0xff << 2);
}

View File

@ -51,6 +51,7 @@ struct hwmon_device_attribute {
#define to_hwmon_attr(d) \
container_of(d, struct hwmon_device_attribute, dev_attr)
#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
/*
* Thermal zone information
@ -58,7 +59,7 @@ struct hwmon_device_attribute {
* also provides the sensor index.
*/
struct hwmon_thermal_data {
struct hwmon_device *hwdev; /* Reference to hwmon device */
struct device *dev; /* Reference to hwmon device */
int index; /* sensor index */
};
@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = {
NULL
};
static void hwmon_free_attrs(struct attribute **attrs)
{
int i;
for (i = 0; attrs[i]; i++) {
struct device_attribute *dattr = to_dev_attr(attrs[i]);
struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
kfree(hattr);
}
kfree(attrs);
}
static void hwmon_dev_release(struct device *dev)
{
kfree(to_hwmon_device(dev));
struct hwmon_device *hwdev = to_hwmon_device(dev);
if (hwdev->group.attrs)
hwmon_free_attrs(hwdev->group.attrs);
kfree(hwdev->groups);
kfree(hwdev);
}
static struct class hwmon_class = {
@ -121,11 +140,11 @@ static DEFINE_IDA(hwmon_ida);
static int hwmon_thermal_get_temp(void *data, int *temp)
{
struct hwmon_thermal_data *tdata = data;
struct hwmon_device *hwdev = tdata->hwdev;
struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
int ret;
long t;
ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
tdata->index, &t);
if (ret < 0)
return ret;
@ -139,26 +158,31 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
.get_temp = hwmon_thermal_get_temp,
};
static int hwmon_thermal_add_sensor(struct device *dev,
struct hwmon_device *hwdev, int index)
static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
struct hwmon_thermal_data *tdata;
struct thermal_zone_device *tzd;
tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL);
if (!tdata)
return -ENOMEM;
tdata->hwdev = hwdev;
tdata->dev = dev;
tdata->index = index;
devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
&hwmon_thermal_ops);
tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
&hwmon_thermal_ops);
/*
* If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
* so ignore that error but forward any other error.
*/
if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
return PTR_ERR(tzd);
return 0;
}
#else
static int hwmon_thermal_add_sensor(struct device *dev,
struct hwmon_device *hwdev, int index)
static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
return 0;
}
@ -235,8 +259,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
(type == hwmon_fan && attr == hwmon_fan_label);
}
static struct attribute *hwmon_genattr(struct device *dev,
const void *drvdata,
static struct attribute *hwmon_genattr(const void *drvdata,
enum hwmon_sensor_types type,
u32 attr,
int index,
@ -264,7 +287,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
if ((mode & S_IWUGO) && !ops->write)
return ERR_PTR(-EINVAL);
hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
if (!hattr)
return ERR_PTR(-ENOMEM);
@ -467,8 +490,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
return n;
}
static int hwmon_genattrs(struct device *dev,
const void *drvdata,
static int hwmon_genattrs(const void *drvdata,
struct attribute **attrs,
const struct hwmon_ops *ops,
const struct hwmon_channel_info *info)
@ -494,7 +516,7 @@ static int hwmon_genattrs(struct device *dev,
attr_mask &= ~BIT(attr);
if (attr >= template_size)
return -EINVAL;
a = hwmon_genattr(dev, drvdata, info->type, attr, i,
a = hwmon_genattr(drvdata, info->type, attr, i,
templates[attr], ops);
if (IS_ERR(a)) {
if (PTR_ERR(a) != -ENOENT)
@ -508,8 +530,7 @@ static int hwmon_genattrs(struct device *dev,
}
static struct attribute **
__hwmon_create_attrs(struct device *dev, const void *drvdata,
const struct hwmon_chip_info *chip)
__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
{
int ret, i, aindex = 0, nattrs = 0;
struct attribute **attrs;
@ -520,15 +541,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata,
if (nattrs == 0)
return ERR_PTR(-EINVAL);
attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return ERR_PTR(-ENOMEM);
for (i = 0; chip->info[i]; i++) {
ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
chip->info[i]);
if (ret < 0)
if (ret < 0) {
hwmon_free_attrs(attrs);
return ERR_PTR(ret);
}
aindex += ret;
}
@ -570,14 +593,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
for (i = 0; groups[i]; i++)
ngroups++;
hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
GFP_KERNEL);
hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
if (!hwdev->groups) {
err = -ENOMEM;
goto free_hwmon;
}
attrs = __hwmon_create_attrs(dev, drvdata, chip);
attrs = __hwmon_create_attrs(drvdata, chip);
if (IS_ERR(attrs)) {
err = PTR_ERR(attrs);
goto free_hwmon;
@ -621,8 +643,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (!chip->ops->is_visible(drvdata, hwmon_temp,
hwmon_temp_input, j))
continue;
if (info[i]->config[j] & HWMON_T_INPUT)
hwmon_thermal_add_sensor(dev, hwdev, j);
if (info[i]->config[j] & HWMON_T_INPUT) {
err = hwmon_thermal_add_sensor(hdev, j);
if (err) {
device_unregister(hdev);
goto ida_remove;
}
}
}
}
}
@ -630,7 +657,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
return hdev;
free_hwmon:
kfree(hwdev);
hwmon_dev_release(hdev);
ida_remove:
ida_simple_remove(&hwmon_ida, id);
return ERR_PTR(err);

View File

@ -32,8 +32,8 @@
static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
{ 0x40, 0x00, 0x42, 0x44, 0x46 },
{ 0x3f, 0x00, 0x41, 0x43, 0x45 },
{ 0x46, 0x00, 0x40, 0x42, 0x44 },
{ 0x45, 0x00, 0x3f, 0x41, 0x43 },
};
static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };

View File

@ -366,9 +366,7 @@ static void *etb_alloc_buffer(struct coresight_device *csdev,
int node, cpu = event->cpu;
struct cs_buffers *buf;
if (cpu == -1)
cpu = smp_processor_id();
node = cpu_to_node(cpu);
node = (cpu == -1) ? NUMA_NO_NODE : cpu_to_node(cpu);
buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
if (!buf)

View File

@ -392,9 +392,7 @@ static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
int node, cpu = event->cpu;
struct cs_buffers *buf;
if (cpu == -1)
cpu = smp_processor_id();
node = cpu_to_node(cpu);
node = (cpu == -1) ? NUMA_NO_NODE : cpu_to_node(cpu);
/* Allocate memory structure for interaction with Perf */
buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);

View File

@ -2582,17 +2582,6 @@ isert_wait4logout(struct isert_conn *isert_conn)
}
}
static void
isert_wait4cmds(struct iscsi_conn *conn)
{
isert_info("iscsi_conn %p\n", conn);
if (conn->sess) {
target_sess_cmd_list_set_waiting(conn->sess->se_sess);
target_wait_for_sess_cmds(conn->sess->se_sess);
}
}
/**
* isert_put_unsol_pending_cmds() - Drop commands waiting for
* unsolicitate dataout
@ -2640,7 +2629,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
ib_drain_qp(isert_conn->qp);
isert_put_unsol_pending_cmds(conn);
isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
queue_work(isert_release_wq, &isert_conn->release_work);

View File

@ -344,7 +344,8 @@ static int keyspan_setup(struct usb_device* dev)
int retval = 0;
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
0x11, 0x40, 0x5601, 0x0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
__func__, retval);
@ -352,7 +353,8 @@ static int keyspan_setup(struct usb_device* dev)
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
0x44, 0x40, 0x0, 0x0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
__func__, retval);
@ -360,7 +362,8 @@ static int keyspan_setup(struct usb_device* dev)
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
0x22, 0x40, 0x0, 0x0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
__func__, retval);

View File

@ -98,7 +98,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
if (regs->enable_mask)
rc = regmap_update_bits(vib->regmap, regs->enable_addr,
on ? regs->enable_mask : 0, val);
regs->enable_mask, on ? ~0 : 0);
return rc;
}

View File

@ -166,6 +166,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
/* prepare to write next block of bytes */
cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT;
rmiaddr += SMB_MAX_COUNT;
}
exit:
mutex_unlock(&rmi_smb->page_mutex);
@ -217,6 +218,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
/* prepare to read next block of bytes */
cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT;
rmiaddr += SMB_MAX_COUNT;
}
retval = 0;

View File

@ -1822,14 +1822,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
/* Verify that a device really has an endpoint */
if (intf->altsetting[0].desc.bNumEndpoints < 1) {
if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&intf->dev,
"interface has %d endpoints, but must have minimum 1\n",
intf->altsetting[0].desc.bNumEndpoints);
intf->cur_altsetting->desc.bNumEndpoints);
err = -EINVAL;
goto fail3;
}
endpoint = &intf->altsetting[0].endpoint[0].desc;
endpoint = &intf->cur_altsetting->endpoint[0].desc;
/* Go set up our URB, which is called when the tablet receives
* input.

View File

@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
}
/* Sanity check that a device has an endpoint */
if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&usbinterface->dev,
"Invalid number of endpoints\n");
error = -EINVAL;
goto err_free_urb;
}
/*
* The endpoint is always altsetting 0, we know this since we know
* this device only has one interrupt endpoint
*/
endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
/* Some debug */
dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
@ -973,7 +969,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
input_dev->dev.parent = &usbinterface->dev;
/* Setup the URB, it will be posted later on open of input device */
endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
usb_fill_int_urb(gtco->urbinfo,
udev,

View File

@ -260,7 +260,7 @@ static int pegasus_probe(struct usb_interface *intf,
return -ENODEV;
/* Sanity check that the device has an endpoint */
if (intf->altsetting[0].desc.bNumEndpoints < 1) {
if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&intf->dev, "Invalid number of endpoints\n");
return -EINVAL;
}

View File

@ -246,6 +246,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device *hwmon;
struct thermal_zone_device *thermal;
int error;
u32 reg;
bool ts_attached;
@ -365,7 +366,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
&sun4i_ts_tz_ops);
if (IS_ERR(thermal))
return PTR_ERR(thermal);
writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);

View File

@ -537,7 +537,7 @@ static int sur40_probe(struct usb_interface *interface,
int error;
/* Check if we really have the right interface. */
iface_desc = &interface->altsetting[0];
iface_desc = interface->cur_altsetting;
if (iface_desc->desc.bInterfaceClass != 0xFF)
return -ENODEV;

View File

@ -1729,7 +1729,7 @@ void bitmap_flush(struct mddev *mddev)
/*
* free memory that was allocated
*/
void bitmap_free(struct bitmap *bitmap)
void md_bitmap_free(struct bitmap *bitmap)
{
unsigned long k, pages;
struct bitmap_page *bp;
@ -1763,7 +1763,7 @@ void bitmap_free(struct bitmap *bitmap)
kfree(bp);
kfree(bitmap);
}
EXPORT_SYMBOL(bitmap_free);
EXPORT_SYMBOL(md_bitmap_free);
void bitmap_wait_behind_writes(struct mddev *mddev)
{
@ -1796,7 +1796,7 @@ void bitmap_destroy(struct mddev *mddev)
if (mddev->thread)
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
bitmap_free(bitmap);
md_bitmap_free(bitmap);
}
/*
@ -1887,7 +1887,7 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
return bitmap;
error:
bitmap_free(bitmap);
md_bitmap_free(bitmap);
return ERR_PTR(err);
}
@ -1958,7 +1958,7 @@ struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
rv = bitmap_init_from_disk(bitmap, 0);
if (rv) {
bitmap_free(bitmap);
md_bitmap_free(bitmap);
return ERR_PTR(rv);
}

View File

@ -271,7 +271,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot);
int bitmap_copy_from_slot(struct mddev *mddev, int slot,
sector_t *lo, sector_t *hi, bool clear_bits);
void bitmap_free(struct bitmap *bitmap);
void md_bitmap_free(struct bitmap *bitmap);
void bitmap_wait_behind_writes(struct mddev *mddev);
#endif

View File

@ -1128,7 +1128,7 @@ int cluster_check_sync_size(struct mddev *mddev)
bm_lockres = lockres_init(mddev, str, NULL, 1);
if (!bm_lockres) {
pr_err("md-cluster: Cannot initialize %s\n", str);
bitmap_free(bitmap);
md_bitmap_free(bitmap);
return -1;
}
bm_lockres->flags |= DLM_LKF_NOQUEUE;
@ -1142,11 +1142,11 @@ int cluster_check_sync_size(struct mddev *mddev)
sync_size = sb->sync_size;
else if (sync_size != sb->sync_size) {
kunmap_atomic(sb);
bitmap_free(bitmap);
md_bitmap_free(bitmap);
return -1;
}
kunmap_atomic(sb);
bitmap_free(bitmap);
md_bitmap_free(bitmap);
}
return (my_sync_size == sync_size) ? 0 : -1;

View File

@ -1589,12 +1589,12 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
break;
CLEAR_AFTER_FIELD(p, fmt.vbi);
CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
break;
CLEAR_AFTER_FIELD(p, fmt.sliced);
CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vid_out))
@ -1617,22 +1617,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vbi_out))
break;
CLEAR_AFTER_FIELD(p, fmt.vbi);
CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
break;
CLEAR_AFTER_FIELD(p, fmt.sliced);
CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
break;
CLEAR_AFTER_FIELD(p, fmt.sdr);
CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sdr_out))
break;
CLEAR_AFTER_FIELD(p, fmt.sdr);
CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_meta_cap))
@ -1676,12 +1676,12 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
break;
CLEAR_AFTER_FIELD(p, fmt.vbi);
CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
break;
CLEAR_AFTER_FIELD(p, fmt.sliced);
CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vid_out))
@ -1704,22 +1704,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vbi_out))
break;
CLEAR_AFTER_FIELD(p, fmt.vbi);
CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
break;
CLEAR_AFTER_FIELD(p, fmt.sliced);
CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
break;
CLEAR_AFTER_FIELD(p, fmt.sdr);
CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sdr_out))
break;
CLEAR_AFTER_FIELD(p, fmt.sdr);
CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_meta_cap))

View File

@ -177,7 +177,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
}

View File

@ -4572,11 +4572,13 @@ int sdhci_setup_host(struct sdhci_host *host)
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else if (host->version >= SDHCI_SPEC_300) {
if (host->clk_mul) {
mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
if (host->clk_mul)
max_clk = host->max_clk * host->clk_mul;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
/*
* Divided Clock Mode minimum clock rate is always less than
* Programmable Clock Mode minimum clock rate.
*/
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;

View File

@ -343,9 +343,16 @@ static void slcan_transmit(struct work_struct *work)
*/
static void slcan_write_wakeup(struct tty_struct *tty)
{
struct slcan *sl = tty->disc_data;
struct slcan *sl;
rcu_read_lock();
sl = rcu_dereference(tty->disc_data);
if (!sl)
goto out;
schedule_work(&sl->tx_work);
out:
rcu_read_unlock();
}
/* Send a can_frame to a TTY queue. */
@ -640,10 +647,11 @@ static void slcan_close(struct tty_struct *tty)
return;
spin_lock_bh(&sl->lock);
tty->disc_data = NULL;
rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
synchronize_rcu();
flush_work(&sl->tx_work);
/* Flush network side */

View File

@ -2449,6 +2449,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (!is_offload(adapter))
return -EOPNOTSUPP;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (!(adapter->flags & FULL_INIT_DONE))
return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t)))

View File

@ -807,19 +807,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
return NULL;
}
if (sock->sk->sk_protocol != IPPROTO_UDP) {
sk = sock->sk;
if (sk->sk_protocol != IPPROTO_UDP ||
sk->sk_type != SOCK_DGRAM ||
(sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
pr_debug("socket fd=%d not UDP\n", fd);
sk = ERR_PTR(-EINVAL);
goto out_sock;
}
lock_sock(sock->sk);
if (sock->sk->sk_user_data) {
lock_sock(sk);
if (sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
goto out_rel_sock;
}
sk = sock->sk;
sock_hold(sk);
tuncfg.sk_user_data = gtp;

View File

@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work)
*/
static void slip_write_wakeup(struct tty_struct *tty)
{
struct slip *sl = tty->disc_data;
struct slip *sl;
rcu_read_lock();
sl = rcu_dereference(tty->disc_data);
if (!sl)
goto out;
schedule_work(&sl->tx_work);
out:
rcu_read_unlock();
}
static void sl_tx_timeout(struct net_device *dev)
@ -886,10 +893,11 @@ static void slip_close(struct tty_struct *tty)
return;
spin_lock_bh(&sl->lock);
tty->disc_data = NULL;
rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
synchronize_rcu();
flush_work(&sl->tx_work);
/* VSV = very important to remove timers */

View File

@ -31,6 +31,7 @@
#include <linux/mdio.h>
#include <linux/phy.h>
#include <net/ip6_checksum.h>
#include <net/vxlan.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
@ -3523,6 +3524,19 @@ static void lan78xx_tx_timeout(struct net_device *net)
tasklet_schedule(&dev->bh);
}
static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
features &= ~NETIF_F_GSO_MASK;
features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features);
return features;
}
static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_open = lan78xx_open,
.ndo_stop = lan78xx_stop,
@ -3536,6 +3550,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_set_features = lan78xx_set_features,
.ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
.ndo_features_check = lan78xx_features_check,
};
static void lan78xx_stat_monitor(unsigned long param)

View File

@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
int hw, ap, ap_max = ie[1];
u8 hw_rate;
if (ap_max > MAX_RATES) {
lbs_deb_assoc("invalid rates\n");
return tlv;
}
/* Advance past IE header */
ie += 2;
@ -1720,6 +1724,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
struct cmd_ds_802_11_ad_hoc_join cmd;
u8 preamble = RADIO_PREAMBLE_SHORT;
int ret = 0;
int hw, i;
u8 rates_max;
u8 *rates;
/* TODO: set preamble based on scan result */
ret = lbs_set_radio(priv, preamble, 1);
@ -1778,9 +1785,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
if (!rates_eid) {
lbs_add_rates(cmd.bss.rates);
} else {
int hw, i;
u8 rates_max = rates_eid[1];
u8 *rates = cmd.bss.rates;
rates_max = rates_eid[1];
if (rates_max > MAX_RATES) {
lbs_deb_join("invalid rates");
goto out;
}
rates = cmd.bss.rates;
for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
u8 hw_rate = lbs_rates[hw].bitrate / 5;
for (i = 0; i < rates_max; i++) {

View File

@ -37,6 +37,8 @@
#define ISCSI_TRANSPORT_VERSION "2.0-870"
#define ISCSI_SEND_MAX_ALLOWED 10
static int dbg_session;
module_param_named(debug_session, dbg_session, int,
S_IRUGO | S_IWUSR);
@ -3680,6 +3682,7 @@ iscsi_if_rx(struct sk_buff *skb)
struct nlmsghdr *nlh;
struct iscsi_uevent *ev;
uint32_t group;
int retries = ISCSI_SEND_MAX_ALLOWED;
nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
@ -3710,6 +3713,10 @@ iscsi_if_rx(struct sk_buff *skb)
break;
err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
ev, sizeof(*ev));
if (err == -EAGAIN && --retries < 0) {
printk(KERN_WARNING "Send reply failed, error %d\n", err);
break;
}
} while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
skb_pull(skb, rlen);
}

View File

@ -1906,9 +1906,13 @@ static int sd_done(struct scsi_cmnd *SCpnt)
}
break;
case REQ_OP_ZONE_REPORT:
/* To avoid that the block layer performs an incorrect
* bio_advance() call and restart of the remainder of
* incomplete report zone BIOs, always indicate a full
* completion of REQ_OP_ZONE_REPORT.
*/
if (!result) {
good_bytes = scsi_bufflen(SCpnt)
- scsi_get_resid(SCpnt);
good_bytes = scsi_bufflen(SCpnt);
scsi_set_resid(SCpnt, 0);
} else {
good_bytes = 0;

View File

@ -85,6 +85,11 @@ static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
unsigned int payload_len = skb->len - ETH_HLEN;
unsigned int mdp_len = payload_len + MDP_HDR_LEN;
if (mdp_len < skb->len) {
pr_err("drop: too large packet! (%u)\n", skb->len);
return -EINVAL;
}
if (mbo->buffer_length < mdp_len) {
pr_err("drop: too small buffer! (%d for %d)\n",
mbo->buffer_length, mdp_len);
@ -132,6 +137,11 @@ static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
u8 *buff = mbo->virt_address;
unsigned int mep_len = skb->len + MEP_HDR_LEN;
if (mep_len < skb->len) {
pr_err("drop: too large packet! (%u)\n", skb->len);
return -EINVAL;
}
if (mbo->buffer_length < mep_len) {
pr_err("drop: too small buffer! (%d for %d)\n",
mbo->buffer_length, mep_len);

View File

@ -4155,9 +4155,6 @@ int iscsit_close_connection(
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
if (conn->conn_transport->iscsit_wait_conn)
conn->conn_transport->iscsit_wait_conn(conn);
/*
* During Connection recovery drop unacknowledged out of order
* commands for this connection, and prepare the other commands
@ -4243,6 +4240,9 @@ int iscsit_close_connection(
target_sess_cmd_list_set_waiting(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
if (conn->conn_transport->iscsit_wait_conn)
conn->conn_transport->iscsit_wait_conn(conn);
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
struct crypto_ahash *tfm;

View File

@ -106,6 +106,7 @@ source "fs/quota/Kconfig"
source "fs/autofs4/Kconfig"
source "fs/fuse/Kconfig"
source "fs/overlayfs/Kconfig"
source "fs/incfs/Kconfig"
menu "Caches"

View File

@ -111,6 +111,7 @@ obj-$(CONFIG_ADFS_FS) += adfs/
obj-$(CONFIG_FUSE_FS) += fuse/
obj-$(CONFIG_OVERLAY_FS) += overlayfs/
obj-$(CONFIG_ORANGEFS_FS) += orangefs/
obj-$(CONFIG_INCREMENTAL_FS) += incfs/
obj-$(CONFIG_UDF_FS) += udf/
obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/
obj-$(CONFIG_OMFS_FS) += omfs/

19
fs/incfs/Kconfig Normal file
View File

@ -0,0 +1,19 @@
config INCREMENTAL_FS
tristate "Incremental file system support"
depends on BLOCK
select DECOMPRESS_LZ4
select CRC32
select CRYPTO
select CRYPTO_RSA
select CRYPTO_SHA256
select X509_CERTIFICATE_PARSER
select ASYMMETRIC_KEY_TYPE
select ASYMMETRIC_PUBLIC_KEY_SUBTYPE
select PKCS7_MESSAGE_PARSER
help
Incremental FS is a read-only virtual file system that facilitates execution
of programs while their binaries are still being lazily downloaded over the
network, USB or pigeon post.
To compile this file system support as a module, choose M here: the
module will be called incrementalfs.

9
fs/incfs/Makefile Normal file
View File

@ -0,0 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INCREMENTAL_FS) += incrementalfs.o
incrementalfs-y := \
data_mgmt.o \
format.o \
integrity.o \
main.o \
vfs.o

1136
fs/incfs/data_mgmt.c Normal file

File diff suppressed because it is too large Load Diff

339
fs/incfs/data_mgmt.h Normal file
View File

@ -0,0 +1,339 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 Google LLC
*/
#ifndef _INCFS_DATA_MGMT_H
#define _INCFS_DATA_MGMT_H
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/wait.h>
#include <crypto/hash.h>
#include <uapi/linux/incrementalfs.h>
#include "internal.h"
#define SEGMENTS_PER_FILE 3
struct read_log_record {
u32 block_index : 31;
u32 timed_out : 1;
u64 timestamp_us;
incfs_uuid_t file_id;
} __packed;
struct read_log_state {
/* Next slot in rl_ring_buf to write to. */
u32 next_index;
/* Current number of writer pass over rl_ring_buf */
u32 current_pass_no;
};
/* A ring buffer to save records about data blocks which were recently read. */
struct read_log {
struct read_log_record *rl_ring_buf;
struct read_log_state rl_state;
spinlock_t rl_writer_lock;
int rl_size;
/*
* A queue of waiters who want to be notified about reads.
*/
wait_queue_head_t ml_notif_wq;
};
struct mount_options {
unsigned int read_timeout_ms;
unsigned int readahead_pages;
unsigned int read_log_pages;
unsigned int read_log_wakeup_count;
bool no_backing_file_cache;
bool no_backing_file_readahead;
};
struct mount_info {
struct super_block *mi_sb;
struct path mi_backing_dir_path;
struct dentry *mi_index_dir;
const struct cred *mi_owner;
struct mount_options mi_options;
/* This mutex is to be taken before create, rename, delete */
struct mutex mi_dir_struct_mutex;
/*
* A queue of waiters who want to be notified about new pending reads.
*/
wait_queue_head_t mi_pending_reads_notif_wq;
/*
* Protects:
* - reads_list_head
* - mi_pending_reads_count
* - mi_last_pending_read_number
* - data_file_segment.reads_list_head
*/
struct mutex mi_pending_reads_mutex;
/* List of active pending_read objects */
struct list_head mi_reads_list_head;
/* Total number of items in reads_list_head */
int mi_pending_reads_count;
/*
* Last serial number that was assigned to a pending read.
* 0 means no pending reads have been seen yet.
*/
int mi_last_pending_read_number;
/* Temporary buffer for read logger. */
struct read_log mi_log;
};
struct data_file_block {
loff_t db_backing_file_data_offset;
size_t db_stored_size;
enum incfs_compression_alg db_comp_alg;
};
struct pending_read {
incfs_uuid_t file_id;
s64 timestamp_us;
atomic_t done;
int block_index;
int serial_number;
struct list_head mi_reads_list;
struct list_head segment_reads_list;
};
struct data_file_segment {
wait_queue_head_t new_data_arrival_wq;
/* Protects reads and writes from the blockmap */
/* Good candidate for read/write mutex */
struct mutex blockmap_mutex;
/* List of active pending_read objects belonging to this segment */
/* Protected by mount_info.pending_reads_mutex */
struct list_head reads_list_head;
};
/*
* Extra info associated with a file. Just a few bytes set by a user.
*/
struct file_attr {
loff_t fa_value_offset;
size_t fa_value_size;
u32 fa_crc;
};
struct data_file {
struct backing_file_context *df_backing_file_context;
struct mount_info *df_mount_info;
incfs_uuid_t df_id;
/*
* Array of segments used to reduce lock contention for the file.
* Segment is chosen for a block depends on the block's index.
*/
struct data_file_segment df_segments[SEGMENTS_PER_FILE];
/* Base offset of the first metadata record. */
loff_t df_metadata_off;
/* Base offset of the block map. */
loff_t df_blockmap_off;
/* File size in bytes */
loff_t df_size;
int df_block_count; /* File size in DATA_FILE_BLOCK_SIZE blocks */
struct file_attr n_attr;
struct mtree *df_hash_tree;
struct ondisk_signature *df_signature;
/* True, if file signature has already been validated. */
bool df_signature_validated;
};
struct dir_file {
struct mount_info *mount_info;
struct file *backing_dir;
};
struct inode_info {
struct mount_info *n_mount_info; /* A mount, this file belongs to */
struct inode *n_backing_inode;
struct data_file *n_file;
struct inode n_vfs_inode;
};
struct dentry_info {
struct path backing_path;
};
struct mount_info *incfs_alloc_mount_info(struct super_block *sb,
struct mount_options *options,
struct path *backing_dir_path);
void incfs_free_mount_info(struct mount_info *mi);
struct data_file *incfs_open_data_file(struct mount_info *mi, struct file *bf);
void incfs_free_data_file(struct data_file *df);
int incfs_scan_metadata_chain(struct data_file *df);
struct dir_file *incfs_open_dir_file(struct mount_info *mi, struct file *bf);
void incfs_free_dir_file(struct dir_file *dir);
ssize_t incfs_read_data_file_block(struct mem_range dst, struct data_file *df,
int index, int timeout_ms,
struct mem_range tmp);
int incfs_read_file_signature(struct data_file *df, struct mem_range dst);
int incfs_process_new_data_block(struct data_file *df,
struct incfs_new_data_block *block, u8 *data);
int incfs_process_new_hash_block(struct data_file *df,
struct incfs_new_data_block *block, u8 *data);
bool incfs_fresh_pending_reads_exist(struct mount_info *mi, int last_number);
/*
* Collects pending reads and saves them into the array (reads/reads_size).
* Only reads with serial_number > sn_lowerbound are reported.
* Returns how many reads were saved into the array.
*/
int incfs_collect_pending_reads(struct mount_info *mi, int sn_lowerbound,
struct incfs_pending_read_info *reads,
int reads_size);
int incfs_collect_logged_reads(struct mount_info *mi,
struct read_log_state *start_state,
struct incfs_pending_read_info *reads,
int reads_size);
struct read_log_state incfs_get_log_state(struct mount_info *mi);
int incfs_get_uncollected_logs_count(struct mount_info *mi,
struct read_log_state state);
static inline struct inode_info *get_incfs_node(struct inode *inode)
{
if (!inode)
return NULL;
if (inode->i_sb->s_magic != INCFS_MAGIC_NUMBER) {
/* This inode doesn't belong to us. */
pr_warn_once("incfs: %s on an alien inode.", __func__);
return NULL;
}
return container_of(inode, struct inode_info, n_vfs_inode);
}
static inline struct data_file *get_incfs_data_file(struct file *f)
{
struct inode_info *node = NULL;
if (!f)
return NULL;
if (!S_ISREG(f->f_inode->i_mode))
return NULL;
node = get_incfs_node(f->f_inode);
if (!node)
return NULL;
return node->n_file;
}
static inline struct dir_file *get_incfs_dir_file(struct file *f)
{
if (!f)
return NULL;
if (!S_ISDIR(f->f_inode->i_mode))
return NULL;
return (struct dir_file *)f->private_data;
}
/*
* Make sure that inode_info.n_file is initialized and inode can be used
* for reading and writing data from/to the backing file.
*/
int make_inode_ready_for_data_ops(struct mount_info *mi,
struct inode *inode,
struct file *backing_file);
static inline struct dentry_info *get_incfs_dentry(const struct dentry *d)
{
if (!d)
return NULL;
return (struct dentry_info *)d->d_fsdata;
}
static inline void get_incfs_backing_path(const struct dentry *d,
struct path *path)
{
struct dentry_info *di = get_incfs_dentry(d);
if (!di) {
*path = (struct path) {};
return;
}
*path = di->backing_path;
path_get(path);
}
static inline int get_blocks_count_for_size(u64 size)
{
if (size == 0)
return 0;
return 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
}
bool incfs_equal_ranges(struct mem_range lhs, struct mem_range rhs);
#endif /* _INCFS_DATA_MGMT_H */

687
fs/incfs/format.c Normal file
View File

@ -0,0 +1,687 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018 Google LLC
*/
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/kernel.h>
#include "format.h"
struct backing_file_context *incfs_alloc_bfc(struct file *backing_file)
{
struct backing_file_context *result = NULL;
result = kzalloc(sizeof(*result), GFP_NOFS);
if (!result)
return ERR_PTR(-ENOMEM);
result->bc_file = get_file(backing_file);
mutex_init(&result->bc_mutex);
return result;
}
void incfs_free_bfc(struct backing_file_context *bfc)
{
if (!bfc)
return;
if (bfc->bc_file)
fput(bfc->bc_file);
mutex_destroy(&bfc->bc_mutex);
kfree(bfc);
}
loff_t incfs_get_end_offset(struct file *f)
{
/*
* This function assumes that file size and the end-offset
* are the same. This is not always true.
*/
return i_size_read(file_inode(f));
}
/*
* Truncate the tail of the file to the given length.
* Used to rollback partially successful multistep writes.
*/
static int truncate_backing_file(struct backing_file_context *bfc,
loff_t new_end)
{
struct inode *inode = NULL;
struct dentry *dentry = NULL;
loff_t old_end = 0;
struct iattr attr;
int result = 0;
if (!bfc)
return -EFAULT;
LOCK_REQUIRED(bfc->bc_mutex);
if (!bfc->bc_file)
return -EFAULT;
old_end = incfs_get_end_offset(bfc->bc_file);
if (old_end == new_end)
return 0;
if (old_end < new_end)
return -EINVAL;
inode = bfc->bc_file->f_inode;
dentry = bfc->bc_file->f_path.dentry;
attr.ia_size = new_end;
attr.ia_valid = ATTR_SIZE;
inode_lock(inode);
result = notify_change(dentry, &attr, NULL);
inode_unlock(inode);
return result;
}
/* Append a given number of zero bytes to the end of the backing file. */
static int append_zeros(struct backing_file_context *bfc, size_t len)
{
loff_t file_size = 0;
loff_t new_last_byte_offset = 0;
int res = 0;
if (!bfc)
return -EFAULT;
if (len == 0)
return 0;
LOCK_REQUIRED(bfc->bc_mutex);
/*
* Allocate only one byte at the new desired end of the file.
* It will increase file size and create a zeroed area of
* a given size.
*/
file_size = incfs_get_end_offset(bfc->bc_file);
new_last_byte_offset = file_size + len - 1;
res = vfs_fallocate(bfc->bc_file, 0, new_last_byte_offset, 1);
if (res)
return res;
res = vfs_fsync_range(bfc->bc_file, file_size, file_size + len, 1);
return res;
}
static int write_to_bf(struct backing_file_context *bfc, const void *buf,
size_t count, loff_t pos, bool sync)
{
ssize_t res = 0;
res = incfs_kwrite(bfc->bc_file, buf, count, pos);
if (res < 0)
return res;
if (res != count)
return -EIO;
if (sync)
return vfs_fsync_range(bfc->bc_file, pos, pos + count, 1);
return 0;
}
static u32 calc_md_crc(struct incfs_md_header *record)
{
u32 result = 0;
__le32 saved_crc = record->h_record_crc;
__le64 saved_md_offset = record->h_next_md_offset;
size_t record_size = min_t(size_t, le16_to_cpu(record->h_record_size),
INCFS_MAX_METADATA_RECORD_SIZE);
/* Zero fields which needs to be excluded from CRC calculation. */
record->h_record_crc = 0;
record->h_next_md_offset = 0;
result = crc32(0, record, record_size);
/* Restore excluded fields. */
record->h_record_crc = saved_crc;
record->h_next_md_offset = saved_md_offset;
return result;
}
/*
* Append a given metadata record to the backing file and update a previous
* record to add the new record the the metadata list.
*/
static int append_md_to_backing_file(struct backing_file_context *bfc,
struct incfs_md_header *record)
{
int result = 0;
loff_t record_offset;
loff_t file_pos;
__le64 new_md_offset;
size_t record_size;
if (!bfc || !record)
return -EFAULT;
if (bfc->bc_last_md_record_offset < 0)
return -EINVAL;
LOCK_REQUIRED(bfc->bc_mutex);
record_size = le16_to_cpu(record->h_record_size);
file_pos = incfs_get_end_offset(bfc->bc_file);
record->h_prev_md_offset = bfc->bc_last_md_record_offset;
record->h_next_md_offset = 0;
record->h_record_crc = cpu_to_le32(calc_md_crc(record));
/* Write the metadata record to the end of the backing file */
record_offset = file_pos;
new_md_offset = cpu_to_le64(record_offset);
result = write_to_bf(bfc, record, record_size, file_pos, true);
if (result)
return result;
/* Update next metadata offset in a previous record or a superblock. */
if (bfc->bc_last_md_record_offset) {
/*
* Find a place in the previous md record where new record's
* offset needs to be saved.
*/
file_pos = bfc->bc_last_md_record_offset +
offsetof(struct incfs_md_header, h_next_md_offset);
} else {
/*
* No metadata yet, file a place to update in the
* file_header.
*/
file_pos = offsetof(struct incfs_file_header,
fh_first_md_offset);
}
result = write_to_bf(bfc, &new_md_offset, sizeof(new_md_offset),
file_pos, true);
if (result)
return result;
bfc->bc_last_md_record_offset = record_offset;
return result;
}
/*
* Reserve 0-filled space for the blockmap body, and append
* incfs_blockmap metadata record pointing to it.
*/
int incfs_write_blockmap_to_backing_file(struct backing_file_context *bfc,
u32 block_count, loff_t *map_base_off)
{
struct incfs_blockmap blockmap = {};
int result = 0;
loff_t file_end = 0;
size_t map_size = block_count * sizeof(struct incfs_blockmap_entry);
if (!bfc)
return -EFAULT;
blockmap.m_header.h_md_entry_type = INCFS_MD_BLOCK_MAP;
blockmap.m_header.h_record_size = cpu_to_le16(sizeof(blockmap));
blockmap.m_header.h_next_md_offset = cpu_to_le64(0);
blockmap.m_block_count = cpu_to_le32(block_count);
LOCK_REQUIRED(bfc->bc_mutex);
/* Reserve 0-filled space for the blockmap body in the backing file. */
file_end = incfs_get_end_offset(bfc->bc_file);
result = append_zeros(bfc, map_size);
if (result)
return result;
/* Write blockmap metadata record pointing to the body written above. */
blockmap.m_base_offset = cpu_to_le64(file_end);
result = append_md_to_backing_file(bfc, &blockmap.m_header);
if (result) {
/* Error, rollback file changes */
truncate_backing_file(bfc, file_end);
} else if (map_base_off) {
*map_base_off = file_end;
}
return result;
}
/*
* Write file attribute data and metadata record to the backing file.
*/
int incfs_write_file_attr_to_backing_file(struct backing_file_context *bfc,
struct mem_range value, struct incfs_file_attr *attr)
{
struct incfs_file_attr file_attr = {};
int result = 0;
u32 crc = 0;
loff_t value_offset = 0;
if (!bfc)
return -EFAULT;
if (value.len > INCFS_MAX_FILE_ATTR_SIZE)
return -ENOSPC;
LOCK_REQUIRED(bfc->bc_mutex);
crc = crc32(0, value.data, value.len);
value_offset = incfs_get_end_offset(bfc->bc_file);
file_attr.fa_header.h_md_entry_type = INCFS_MD_FILE_ATTR;
file_attr.fa_header.h_record_size = cpu_to_le16(sizeof(file_attr));
file_attr.fa_header.h_next_md_offset = cpu_to_le64(0);
file_attr.fa_size = cpu_to_le16((u16)value.len);
file_attr.fa_offset = cpu_to_le64(value_offset);
file_attr.fa_crc = cpu_to_le64(crc);
result = write_to_bf(bfc, value.data, value.len, value_offset, true);
if (result)
return result;
result = append_md_to_backing_file(bfc, &file_attr.fa_header);
if (result) {
/* Error, rollback file changes */
truncate_backing_file(bfc, value_offset);
} else if (attr) {
*attr = file_attr;
}
return result;
}
int incfs_write_signature_to_backing_file(struct backing_file_context *bfc,
u8 hash_alg, u32 tree_size,
struct mem_range root_hash, struct mem_range add_data,
struct mem_range sig)
{
struct incfs_file_signature sg = {};
int result = 0;
loff_t rollback_pos = 0;
loff_t tree_area_pos = 0;
size_t alignment = 0;
if (!bfc)
return -EFAULT;
if (root_hash.len > sizeof(sg.sg_root_hash))
return -E2BIG;
LOCK_REQUIRED(bfc->bc_mutex);
rollback_pos = incfs_get_end_offset(bfc->bc_file);
sg.sg_header.h_md_entry_type = INCFS_MD_SIGNATURE;
sg.sg_header.h_record_size = cpu_to_le16(sizeof(sg));
sg.sg_header.h_next_md_offset = cpu_to_le64(0);
sg.sg_hash_alg = hash_alg;
if (sig.data != NULL && sig.len > 0) {
loff_t pos = incfs_get_end_offset(bfc->bc_file);
sg.sg_sig_size = cpu_to_le32(sig.len);
sg.sg_sig_offset = cpu_to_le64(pos);
result = write_to_bf(bfc, sig.data, sig.len, pos, false);
if (result)
goto err;
}
if (add_data.len > 0) {
loff_t pos = incfs_get_end_offset(bfc->bc_file);
sg.sg_add_data_size = cpu_to_le32(add_data.len);
sg.sg_add_data_offset = cpu_to_le64(pos);
result = write_to_bf(bfc, add_data.data,
add_data.len, pos, false);
if (result)
goto err;
}
tree_area_pos = incfs_get_end_offset(bfc->bc_file);
if (hash_alg && tree_size > 0) {
if (tree_size > 5 * INCFS_DATA_FILE_BLOCK_SIZE) {
/*
* If hash tree is big enough, it makes sense to
* align in the backing file for faster access.
*/
loff_t offset = round_up(tree_area_pos, PAGE_SIZE);
alignment = offset - tree_area_pos;
tree_area_pos = offset;
}
/*
* If root hash is not the only hash in the tree.
* reserve 0-filled space for the tree.
*/
result = append_zeros(bfc, tree_size + alignment);
if (result)
goto err;
sg.sg_hash_tree_size = cpu_to_le32(tree_size);
sg.sg_hash_tree_offset = cpu_to_le64(tree_area_pos);
}
memcpy(sg.sg_root_hash, root_hash.data, root_hash.len);
/* Write a hash tree metadata record pointing to the hash tree above. */
result = append_md_to_backing_file(bfc, &sg.sg_header);
err:
if (result) {
/* Error, rollback file changes */
truncate_backing_file(bfc, rollback_pos);
}
return result;
}
/*
* Write a backing file header
* It should always be called only on empty file.
* incfs_super_block.s_first_md_offset is 0 for now, but will be updated
* once first metadata record is added.
*/
int incfs_write_fh_to_backing_file(struct backing_file_context *bfc,
incfs_uuid_t *uuid, u64 file_size)
{
struct incfs_file_header fh = {};
loff_t file_pos = 0;
if (!bfc)
return -EFAULT;
fh.fh_magic = cpu_to_le64(INCFS_MAGIC_NUMBER);
fh.fh_version = cpu_to_le64(INCFS_FORMAT_CURRENT_VER);
fh.fh_header_size = cpu_to_le16(sizeof(fh));
fh.fh_first_md_offset = cpu_to_le64(0);
fh.fh_data_block_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
fh.fh_file_size = cpu_to_le64(file_size);
fh.fh_uuid = *uuid;
LOCK_REQUIRED(bfc->bc_mutex);
file_pos = incfs_get_end_offset(bfc->bc_file);
if (file_pos != 0)
return -EEXIST;
return write_to_bf(bfc, &fh, sizeof(fh), file_pos, true);
}
/* Write a given data block and update file's blockmap to point it. */
int incfs_write_data_block_to_backing_file(struct backing_file_context *bfc,
struct mem_range block, int block_index,
loff_t bm_base_off, u16 flags)
{
struct incfs_blockmap_entry bm_entry = {};
int result = 0;
loff_t data_offset = 0;
loff_t bm_entry_off =
bm_base_off + sizeof(struct incfs_blockmap_entry) * block_index;
if (!bfc)
return -EFAULT;
if (block.len >= (1 << 16) || block_index < 0)
return -EINVAL;
LOCK_REQUIRED(bfc->bc_mutex);
data_offset = incfs_get_end_offset(bfc->bc_file);
if (data_offset <= bm_entry_off) {
/* Blockmap entry is beyond the file's end. It is not normal. */
return -EINVAL;
}
/* Write the block data at the end of the backing file. */
result = write_to_bf(bfc, block.data, block.len, data_offset, false);
if (result)
return result;
/* Update the blockmap to point to the newly written data. */
bm_entry.me_data_offset_lo = cpu_to_le32((u32)data_offset);
bm_entry.me_data_offset_hi = cpu_to_le16((u16)(data_offset >> 32));
bm_entry.me_data_size = cpu_to_le16((u16)block.len);
bm_entry.me_flags = cpu_to_le16(flags);
result = write_to_bf(bfc, &bm_entry, sizeof(bm_entry),
bm_entry_off, false);
return result;
}
int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc,
struct mem_range block,
int block_index, loff_t hash_area_off)
{
loff_t data_offset = 0;
loff_t file_end = 0;
if (!bfc)
return -EFAULT;
LOCK_REQUIRED(bfc->bc_mutex);
data_offset = hash_area_off + block_index * INCFS_DATA_FILE_BLOCK_SIZE;
file_end = incfs_get_end_offset(bfc->bc_file);
if (data_offset + block.len > file_end) {
/* Block is located beyond the file's end. It is not normal. */
return -EINVAL;
}
return write_to_bf(bfc, block.data, block.len, data_offset, false);
}
/* Initialize a new image in a given backing file. */
int incfs_make_empty_backing_file(struct backing_file_context *bfc,
incfs_uuid_t *uuid, u64 file_size)
{
int result = 0;
if (!bfc || !bfc->bc_file)
return -EFAULT;
result = mutex_lock_interruptible(&bfc->bc_mutex);
if (result)
goto out;
result = truncate_backing_file(bfc, 0);
if (result)
goto out;
result = incfs_write_fh_to_backing_file(bfc, uuid, file_size);
out:
mutex_unlock(&bfc->bc_mutex);
return result;
}
int incfs_read_blockmap_entry(struct backing_file_context *bfc, int block_index,
loff_t bm_base_off,
struct incfs_blockmap_entry *bm_entry)
{
return incfs_read_blockmap_entries(bfc, bm_entry, block_index, 1,
bm_base_off);
}
int incfs_read_blockmap_entries(struct backing_file_context *bfc,
struct incfs_blockmap_entry *entries,
int start_index, int blocks_number,
loff_t bm_base_off)
{
loff_t bm_entry_off =
bm_base_off + sizeof(struct incfs_blockmap_entry) * start_index;
const size_t bytes_to_read = sizeof(struct incfs_blockmap_entry)
* blocks_number;
int result = 0;
if (!bfc || !entries)
return -EFAULT;
if (start_index < 0 || bm_base_off <= 0)
return -ENODATA;
result = incfs_kread(bfc->bc_file, entries, bytes_to_read,
bm_entry_off);
if (result < 0)
return result;
if (result < bytes_to_read)
return -EIO;
return 0;
}
int incfs_read_file_header(struct backing_file_context *bfc,
loff_t *first_md_off, incfs_uuid_t *uuid,
u64 *file_size)
{
ssize_t bytes_read = 0;
struct incfs_file_header fh = {};
if (!bfc || !first_md_off)
return -EFAULT;
LOCK_REQUIRED(bfc->bc_mutex);
bytes_read = incfs_kread(bfc->bc_file, &fh, sizeof(fh), 0);
if (bytes_read < 0)
return bytes_read;
if (bytes_read < sizeof(fh))
return -EBADMSG;
if (le64_to_cpu(fh.fh_magic) != INCFS_MAGIC_NUMBER)
return -EILSEQ;
if (le64_to_cpu(fh.fh_version) > INCFS_FORMAT_CURRENT_VER)
return -EILSEQ;
if (le16_to_cpu(fh.fh_data_block_size) != INCFS_DATA_FILE_BLOCK_SIZE)
return -EILSEQ;
if (le16_to_cpu(fh.fh_header_size) != sizeof(fh))
return -EILSEQ;
if (first_md_off)
*first_md_off = le64_to_cpu(fh.fh_first_md_offset);
if (uuid)
*uuid = fh.fh_uuid;
if (file_size)
*file_size = le64_to_cpu(fh.fh_file_size);
return 0;
}
/*
* Read through metadata records from the backing file one by one
* and call provided metadata handlers.
*/
int incfs_read_next_metadata_record(struct backing_file_context *bfc,
struct metadata_handler *handler)
{
const ssize_t max_md_size = INCFS_MAX_METADATA_RECORD_SIZE;
ssize_t bytes_read = 0;
size_t md_record_size = 0;
loff_t next_record = 0;
loff_t prev_record = 0;
int res = 0;
struct incfs_md_header *md_hdr = NULL;
if (!bfc || !handler)
return -EFAULT;
LOCK_REQUIRED(bfc->bc_mutex);
if (handler->md_record_offset == 0)
return -EPERM;
memset(&handler->md_buffer, 0, max_md_size);
bytes_read = incfs_kread(bfc->bc_file, &handler->md_buffer,
max_md_size, handler->md_record_offset);
if (bytes_read < 0)
return bytes_read;
if (bytes_read < sizeof(*md_hdr))
return -EBADMSG;
md_hdr = &handler->md_buffer.md_header;
next_record = le64_to_cpu(md_hdr->h_next_md_offset);
prev_record = le64_to_cpu(md_hdr->h_prev_md_offset);
md_record_size = le16_to_cpu(md_hdr->h_record_size);
if (md_record_size > max_md_size) {
pr_warn("incfs: The record is too large. Size: %ld",
md_record_size);
return -EBADMSG;
}
if (bytes_read < md_record_size) {
pr_warn("incfs: The record hasn't been fully read.");
return -EBADMSG;
}
if (next_record <= handler->md_record_offset && next_record != 0) {
pr_warn("incfs: Next record (%lld) points back in file.",
next_record);
return -EBADMSG;
}
if (prev_record != handler->md_prev_record_offset) {
pr_warn("incfs: Metadata chain has been corrupted.");
return -EBADMSG;
}
if (le32_to_cpu(md_hdr->h_record_crc) != calc_md_crc(md_hdr)) {
pr_warn("incfs: Metadata CRC mismatch.");
return -EBADMSG;
}
switch (md_hdr->h_md_entry_type) {
case INCFS_MD_NONE:
break;
case INCFS_MD_BLOCK_MAP:
if (handler->handle_blockmap)
res = handler->handle_blockmap(
&handler->md_buffer.blockmap, handler);
break;
case INCFS_MD_FILE_ATTR:
if (handler->handle_file_attr)
res = handler->handle_file_attr(
&handler->md_buffer.file_attr, handler);
break;
case INCFS_MD_SIGNATURE:
if (handler->handle_signature)
res = handler->handle_signature(
&handler->md_buffer.signature, handler);
break;
default:
res = -ENOTSUPP;
break;
}
if (!res) {
if (next_record == 0) {
/*
* Zero offset for the next record means that the last
* metadata record has just been processed.
*/
bfc->bc_last_md_record_offset =
handler->md_record_offset;
}
handler->md_prev_record_offset = handler->md_record_offset;
handler->md_record_offset = next_record;
}
return res;
}
ssize_t incfs_kread(struct file *f, void *buf, size_t size, loff_t pos)
{
return kernel_read(f, buf, size, &pos);
}
ssize_t incfs_kwrite(struct file *f, const void *buf, size_t size, loff_t pos)
{
return kernel_write(f, buf, size, &pos);
}

349
fs/incfs/format.h Normal file
View File

@ -0,0 +1,349 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2018 Google LLC
*/
/*
* Overview
* --------
* The backbone of the incremental-fs ondisk format is an append only linked
* list of metadata blocks. Each metadata block contains an offset of the next
* one. These blocks describe files and directories on the
* file system. They also represent actions of adding and removing file names
* (hard links).
*
* Every time incremental-fs instance is mounted, it reads through this list
* to recreate filesystem's state in memory. An offset of the first record in
* the metadata list is stored in the superblock at the beginning of the backing
* file.
*
* Most of the backing file is taken by data areas and blockmaps.
* Since data blocks can be compressed and have different sizes,
* single per-file data area can't be pre-allocated. That's why blockmaps are
* needed in order to find a location and size of each data block in
* the backing file. Each time a file is created, a corresponding block map is
* allocated to store future offsets of data blocks.
*
* Whenever a data block is given by data loader to incremental-fs:
* - A data area with the given block is appended to the end of
* the backing file.
* - A record in the blockmap for the given block index is updated to reflect
* its location, size, and compression algorithm.
* Metadata records
* ----------------
* incfs_blockmap - metadata record that specifies size and location
* of a blockmap area for a given file. This area
* contains an array of incfs_blockmap_entry-s.
* incfs_file_signature - metadata record that specifies where file signature
* and its hash tree can be found in the backing file.
*
* incfs_file_attr - metadata record that specifies where additional file
* attributes blob can be found.
*
* Metadata header
* ---------------
* incfs_md_header - header of a metadata record. It's always a part
* of other structures and served purpose of metadata
* bookkeeping.
*
* +-----------------------------------------------+ ^
* | incfs_md_header | |
* | 1. type of body(BLOCKMAP, FILE_ATTR..) | |
* | 2. size of the whole record header + body | |
* | 3. CRC the whole record header + body | |
* | 4. offset of the previous md record |]------+
* | 5. offset of the next md record (md link) |]---+
* +-----------------------------------------------+ |
* | Metadata record body with useful data | |
* +-----------------------------------------------+ |
* +--->
*
* Other ondisk structures
* -----------------------
* incfs_super_block - backing file header
* incfs_blockmap_entry - a record in a blockmap area that describes size
* and location of a data block.
* Data blocks dont have any particular structure, they are written to the
* backing file in a raw form as they come from a data loader.
*
* Backing file layout
* -------------------
*
*
* +-------------------------------------------+
* | incfs_super_block |]---+
* +-------------------------------------------+ |
* | metadata |<---+
* | incfs_file_signature |]---+
* +-------------------------------------------+ |
* ......................... |
* +-------------------------------------------+ | metadata
* +------->| blockmap area | | list links
* | | [incfs_blockmap_entry] | |
* | | [incfs_blockmap_entry] | |
* | | [incfs_blockmap_entry] | |
* | +--[| [incfs_blockmap_entry] | |
* | | | [incfs_blockmap_entry] | |
* | | | [incfs_blockmap_entry] | |
* | | +-------------------------------------------+ |
* | | ......................... |
* | | +-------------------------------------------+ |
* | | | metadata |<---+
* +----|--[| incfs_blockmap |]---+
* | +-------------------------------------------+ |
* | ......................... |
* | +-------------------------------------------+ |
* +-->| data block | |
* +-------------------------------------------+ |
* ......................... |
* +-------------------------------------------+ |
* | metadata |<---+
* | incfs_file_attr |
* +-------------------------------------------+
*/
#ifndef _INCFS_FORMAT_H
#define _INCFS_FORMAT_H
#include <linux/types.h>
#include <linux/kernel.h>
#include <uapi/linux/incrementalfs.h>
#include "internal.h"
#define INCFS_MAX_NAME_LEN 255
#define INCFS_FORMAT_V1 1
#define INCFS_FORMAT_CURRENT_VER INCFS_FORMAT_V1
enum incfs_metadata_type {
INCFS_MD_NONE = 0,
INCFS_MD_BLOCK_MAP = 1,
INCFS_MD_FILE_ATTR = 2,
INCFS_MD_SIGNATURE = 3
};
/* Header included at the beginning of all metadata records on the disk. */
struct incfs_md_header {
__u8 h_md_entry_type;
/*
* Size of the metadata record.
* (e.g. inode, dir entry etc) not just this struct.
*/
__le16 h_record_size;
/*
* CRC32 of the metadata record.
* (e.g. inode, dir entry etc) not just this struct.
*/
__le32 h_record_crc;
/* Offset of the next metadata entry if any */
__le64 h_next_md_offset;
/* Offset of the previous metadata entry if any */
__le64 h_prev_md_offset;
} __packed;
/* Backing file header */
struct incfs_file_header {
/* Magic number: INCFS_MAGIC_NUMBER */
__le64 fh_magic;
/* Format version: INCFS_FORMAT_CURRENT_VER */
__le64 fh_version;
/* sizeof(incfs_file_header) */
__le16 fh_header_size;
/* INCFS_DATA_FILE_BLOCK_SIZE */
__le16 fh_data_block_size;
/* Padding, also reserved for future use. */
__le32 fh_dummy;
/* Offset of the first metadata record */
__le64 fh_first_md_offset;
/*
* Put file specific information after this point
*/
/* Full size of the file's content */
__le64 fh_file_size;
/* File uuid */
incfs_uuid_t fh_uuid;
} __packed;
enum incfs_block_map_entry_flags {
INCFS_BLOCK_COMPRESSED_LZ4 = (1 << 0),
};
/* Block map entry pointing to an actual location of the data block. */
struct incfs_blockmap_entry {
/* Offset of the actual data block. Lower 32 bits */
__le32 me_data_offset_lo;
/* Offset of the actual data block. Higher 16 bits */
__le16 me_data_offset_hi;
/* How many bytes the data actually occupies in the backing file */
__le16 me_data_size;
/* Block flags from incfs_block_map_entry_flags */
__le16 me_flags;
} __packed;
/* Metadata record for locations of file blocks. Type = INCFS_MD_BLOCK_MAP */
struct incfs_blockmap {
struct incfs_md_header m_header;
/* Base offset of the array of incfs_blockmap_entry */
__le64 m_base_offset;
/* Size of the map entry array in blocks */
__le32 m_block_count;
} __packed;
/* Metadata record for file attribute. Type = INCFS_MD_FILE_ATTR */
struct incfs_file_attr {
struct incfs_md_header fa_header;
__le64 fa_offset;
__le16 fa_size;
__le32 fa_crc;
} __packed;
/* Metadata record for file attribute. Type = INCFS_MD_SIGNATURE */
struct incfs_file_signature {
struct incfs_md_header sg_header;
__u8 sg_hash_alg; /* Value from incfs_hash_tree_algorithm */
__le32 sg_hash_tree_size; /* The size of the hash tree. */
__le64 sg_hash_tree_offset; /* Hash tree offset in the backing file */
__u8 sg_root_hash[INCFS_MAX_HASH_SIZE];
__le32 sg_sig_size; /* The size of the pkcs7 signature. */
__le64 sg_sig_offset; /* pkcs7 signature's offset in the backing file */
__le32 sg_add_data_size; /* The size of the additional data. */
__le64 sg_add_data_offset; /* Additional data's offset */
} __packed;
/* State of the backing file. */
struct backing_file_context {
/* Protects writes to bc_file */
struct mutex bc_mutex;
/* File object to read data from */
struct file *bc_file;
/*
* Offset of the last known metadata record in the backing file.
* 0 means there are no metadata records.
*/
loff_t bc_last_md_record_offset;
};
/* Backing file locations of things required for signature validation. */
struct ondisk_signature {
loff_t add_data_offset; /* Additional data's offset */
loff_t sig_offset; /* pkcs7 signature's offset in the backing file */
loff_t mtree_offset; /* Backing file offset of the hash tree. */
u32 add_data_size; /* The size of the additional data. */
u32 sig_size; /* The size of the pkcs7 signature. */
u32 mtree_size; /* The size of the hash tree. */
};
struct metadata_handler {
loff_t md_record_offset;
loff_t md_prev_record_offset;
void *context;
union {
struct incfs_md_header md_header;
struct incfs_blockmap blockmap;
struct incfs_file_attr file_attr;
struct incfs_file_signature signature;
} md_buffer;
int (*handle_blockmap)(struct incfs_blockmap *bm,
struct metadata_handler *handler);
int (*handle_file_attr)(struct incfs_file_attr *fa,
struct metadata_handler *handler);
int (*handle_signature)(struct incfs_file_signature *sig,
struct metadata_handler *handler);
};
#define INCFS_MAX_METADATA_RECORD_SIZE \
FIELD_SIZEOF(struct metadata_handler, md_buffer)
loff_t incfs_get_end_offset(struct file *f);
/* Backing file context management */
struct backing_file_context *incfs_alloc_bfc(struct file *backing_file);
void incfs_free_bfc(struct backing_file_context *bfc);
/* Writing stuff */
int incfs_write_blockmap_to_backing_file(struct backing_file_context *bfc,
u32 block_count, loff_t *map_base_off);
int incfs_write_fh_to_backing_file(struct backing_file_context *bfc,
incfs_uuid_t *uuid, u64 file_size);
int incfs_write_data_block_to_backing_file(struct backing_file_context *bfc,
struct mem_range block,
int block_index, loff_t bm_base_off,
u16 flags);
int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc,
struct mem_range block,
int block_index, loff_t hash_area_off);
int incfs_write_file_attr_to_backing_file(struct backing_file_context *bfc,
struct mem_range value, struct incfs_file_attr *attr);
int incfs_write_signature_to_backing_file(struct backing_file_context *bfc,
u8 hash_alg, u32 tree_size,
struct mem_range root_hash, struct mem_range add_data,
struct mem_range sig);
int incfs_make_empty_backing_file(struct backing_file_context *bfc,
incfs_uuid_t *uuid, u64 file_size);
/* Reading stuff */
int incfs_read_file_header(struct backing_file_context *bfc,
loff_t *first_md_off, incfs_uuid_t *uuid,
u64 *file_size);
int incfs_read_blockmap_entry(struct backing_file_context *bfc, int block_index,
loff_t bm_base_off,
struct incfs_blockmap_entry *bm_entry);
int incfs_read_blockmap_entries(struct backing_file_context *bfc,
struct incfs_blockmap_entry *entries,
int start_index, int blocks_number,
loff_t bm_base_off);
int incfs_read_next_metadata_record(struct backing_file_context *bfc,
struct metadata_handler *handler);
ssize_t incfs_kread(struct file *f, void *buf, size_t size, loff_t pos);
ssize_t incfs_kwrite(struct file *f, const void *buf, size_t size, loff_t pos);
#endif /* _INCFS_FORMAT_H */

213
fs/incfs/integrity.c Normal file
View File

@ -0,0 +1,213 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 Google LLC
*/
#include <crypto/sha.h>
#include <crypto/hash.h>
#include <linux/err.h>
#include <linux/version.h>
#include <crypto/pkcs7.h>
#include "integrity.h"
int incfs_validate_pkcs7_signature(struct mem_range pkcs7_blob,
struct mem_range root_hash, struct mem_range add_data)
{
struct pkcs7_message *pkcs7 = NULL;
const void *data = NULL;
size_t data_len = 0;
const char *p;
int err;
pkcs7 = pkcs7_parse_message(pkcs7_blob.data, pkcs7_blob.len);
if (IS_ERR(pkcs7)) {
pr_debug("PKCS#7 parsing error. ptr=%p size=%ld err=%ld\n",
pkcs7_blob.data, pkcs7_blob.len, -PTR_ERR(pkcs7));
return PTR_ERR(pkcs7);
}
err = pkcs7_get_content_data(pkcs7, &data, &data_len, NULL);
if (err || data_len == 0 || data == NULL) {
pr_debug("PKCS#7 message does not contain data\n");
err = -EBADMSG;
goto out;
}
if (root_hash.len == 0) {
pr_debug("Root hash is empty.\n");
err = -EBADMSG;
goto out;
}
if (data_len != root_hash.len + add_data.len) {
pr_debug("PKCS#7 data size doesn't match arguments.\n");
err = -EKEYREJECTED;
goto out;
}
p = data;
if (memcmp(p, root_hash.data, root_hash.len) != 0) {
pr_debug("Root hash mismatch.\n");
err = -EKEYREJECTED;
goto out;
}
p += root_hash.len;
if (memcmp(p, add_data.data, add_data.len) != 0) {
pr_debug("Additional data mismatch.\n");
err = -EKEYREJECTED;
goto out;
}
err = pkcs7_verify(pkcs7, VERIFYING_UNSPECIFIED_SIGNATURE);
if (err)
pr_debug("PKCS#7 signature verification error: %d\n", -err);
/*
* RSA signature verification sometimes returns unexpected error codes
* when signature doesn't match.
*/
if (err == -ERANGE || err == -EINVAL)
err = -EBADMSG;
out:
pkcs7_free_message(pkcs7);
return err;
}
struct incfs_hash_alg *incfs_get_hash_alg(enum incfs_hash_tree_algorithm id)
{
static struct incfs_hash_alg sha256 = {
.name = "sha256",
.digest_size = SHA256_DIGEST_SIZE,
.id = INCFS_HASH_TREE_SHA256
};
struct incfs_hash_alg *result = NULL;
struct crypto_shash *shash;
if (id == INCFS_HASH_TREE_SHA256) {
BUILD_BUG_ON(INCFS_MAX_HASH_SIZE < SHA256_DIGEST_SIZE);
result = &sha256;
}
if (result == NULL)
return ERR_PTR(-ENOENT);
/* pairs with cmpxchg_release() below */
shash = smp_load_acquire(&result->shash);
if (shash)
return result;
shash = crypto_alloc_shash(result->name, 0, 0);
if (IS_ERR(shash)) {
int err = PTR_ERR(shash);
pr_err("Can't allocate hash alg %s, error code:%d",
result->name, err);
return ERR_PTR(err);
}
/* pairs with smp_load_acquire() above */
if (cmpxchg_release(&result->shash, NULL, shash) != NULL)
crypto_free_shash(shash);
return result;
}
struct mtree *incfs_alloc_mtree(enum incfs_hash_tree_algorithm id,
int data_block_count,
struct mem_range root_hash)
{
struct mtree *result = NULL;
struct incfs_hash_alg *hash_alg = NULL;
int hash_per_block;
int lvl;
int total_blocks = 0;
int blocks_in_level[INCFS_MAX_MTREE_LEVELS];
int blocks = data_block_count;
if (data_block_count <= 0)
return ERR_PTR(-EINVAL);
hash_alg = incfs_get_hash_alg(id);
if (IS_ERR(hash_alg))
return ERR_PTR(PTR_ERR(hash_alg));
if (root_hash.len < hash_alg->digest_size)
return ERR_PTR(-EINVAL);
result = kzalloc(sizeof(*result), GFP_NOFS);
if (!result)
return ERR_PTR(-ENOMEM);
result->alg = hash_alg;
hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / result->alg->digest_size;
/* Calculating tree geometry. */
/* First pass: calculate how many blocks in each tree level. */
for (lvl = 0; blocks > 1; lvl++) {
if (lvl >= INCFS_MAX_MTREE_LEVELS) {
pr_err("incfs: too much data in mtree");
goto err;
}
blocks = (blocks + hash_per_block - 1) / hash_per_block;
blocks_in_level[lvl] = blocks;
total_blocks += blocks;
}
result->depth = lvl;
result->hash_tree_area_size = total_blocks * INCFS_DATA_FILE_BLOCK_SIZE;
if (result->hash_tree_area_size > INCFS_MAX_HASH_AREA_SIZE)
goto err;
blocks = 0;
/* Second pass: calculate offset of each level. 0th level goes last. */
for (lvl = 0; lvl < result->depth; lvl++) {
u32 suboffset;
blocks += blocks_in_level[lvl];
suboffset = (total_blocks - blocks)
* INCFS_DATA_FILE_BLOCK_SIZE;
result->hash_level_suboffset[lvl] = suboffset;
}
/* Root hash is stored separately from the rest of the tree. */
memcpy(result->root_hash, root_hash.data, hash_alg->digest_size);
return result;
err:
kfree(result);
return ERR_PTR(-E2BIG);
}
void incfs_free_mtree(struct mtree *tree)
{
kfree(tree);
}
int incfs_calc_digest(struct incfs_hash_alg *alg, struct mem_range data,
struct mem_range digest)
{
SHASH_DESC_ON_STACK(desc, alg->shash);
if (!alg || !alg->shash || !data.data || !digest.data)
return -EFAULT;
if (alg->digest_size > digest.len)
return -EINVAL;
desc->tfm = alg->shash;
return crypto_shash_digest(desc, data.data, data.len, digest.data);
}
void incfs_free_signature_info(struct signature_info *si)
{
if (!si)
return;
kfree(si->root_hash.data);
kfree(si->additional_data.data);
kfree(si->signature.data);
kfree(si);
}

72
fs/incfs/integrity.h Normal file
View File

@ -0,0 +1,72 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 Google LLC
*/
#ifndef _INCFS_INTEGRITY_H
#define _INCFS_INTEGRITY_H
#include <linux/types.h>
#include <linux/kernel.h>
#include <crypto/hash.h>
#include <uapi/linux/incrementalfs.h>
#include "internal.h"
#define INCFS_MAX_MTREE_LEVELS 8
#define INCFS_MAX_HASH_AREA_SIZE (1280 * 1024 * 1024)
struct incfs_hash_alg {
const char *name;
int digest_size;
enum incfs_hash_tree_algorithm id;
struct crypto_shash *shash;
};
/* Merkle tree structure. */
struct mtree {
struct incfs_hash_alg *alg;
u8 root_hash[INCFS_MAX_HASH_SIZE];
/* Offset of each hash level in the hash area. */
u32 hash_level_suboffset[INCFS_MAX_MTREE_LEVELS];
u32 hash_tree_area_size;
/* Number of levels in hash_level_suboffset */
int depth;
};
struct signature_info {
struct mem_range root_hash;
struct mem_range additional_data;
struct mem_range signature;
enum incfs_hash_tree_algorithm hash_alg;
};
struct incfs_hash_alg *incfs_get_hash_alg(enum incfs_hash_tree_algorithm id);
struct mtree *incfs_alloc_mtree(enum incfs_hash_tree_algorithm id,
int data_block_count,
struct mem_range root_hash);
void incfs_free_mtree(struct mtree *tree);
size_t incfs_get_mtree_depth(enum incfs_hash_tree_algorithm alg, loff_t size);
size_t incfs_get_mtree_hash_count(enum incfs_hash_tree_algorithm alg,
loff_t size);
int incfs_calc_digest(struct incfs_hash_alg *alg, struct mem_range data,
struct mem_range digest);
int incfs_validate_pkcs7_signature(struct mem_range pkcs7_blob,
struct mem_range root_hash, struct mem_range add_data);
void incfs_free_signature_info(struct signature_info *si);
#endif /* _INCFS_INTEGRITY_H */

21
fs/incfs/internal.h Normal file
View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2018 Google LLC
*/
#ifndef _INCFS_INTERNAL_H
#define _INCFS_INTERNAL_H
#include <linux/types.h>
struct mem_range {
u8 *data;
size_t len;
};
static inline struct mem_range range(u8 *data, size_t len)
{
return (struct mem_range){ .data = data, .len = len };
}
#define LOCK_REQUIRED(lock) WARN_ON_ONCE(!mutex_is_locked(&lock))
#endif /* _INCFS_INTERNAL_H */

103
fs/incfs/main.c Normal file
View File

@ -0,0 +1,103 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018 Google LLC
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <uapi/linux/incrementalfs.h>
#include "vfs.h"
#define INCFS_NODE_FEATURES "features"
struct file_system_type incfs_fs_type = {
.owner = THIS_MODULE,
.name = INCFS_NAME,
.mount = incfs_mount_fs,
.kill_sb = incfs_kill_sb,
.fs_flags = 0
};
static struct kobject *sysfs_root, *featurefs_root;
static ssize_t corefs_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buff)
{
return snprintf(buff, PAGE_SIZE, "supported\n");
}
static struct kobj_attribute corefs_attr = __ATTR_RO(corefs);
static struct attribute *attributes[] = {
&corefs_attr.attr,
NULL,
};
static const struct attribute_group attr_group = {
.attrs = attributes,
};
static int __init init_sysfs(void)
{
int res = 0;
sysfs_root = kobject_create_and_add(INCFS_NAME, fs_kobj);
if (!sysfs_root)
return -ENOMEM;
featurefs_root = kobject_create_and_add(INCFS_NODE_FEATURES,
sysfs_root);
if (!featurefs_root)
return -ENOMEM;
res = sysfs_create_group(featurefs_root, &attr_group);
if (res) {
kobject_put(sysfs_root);
sysfs_root = NULL;
}
return res;
}
static void cleanup_sysfs(void)
{
if (featurefs_root) {
sysfs_remove_group(featurefs_root, &attr_group);
kobject_put(featurefs_root);
featurefs_root = NULL;
}
if (sysfs_root) {
kobject_put(sysfs_root);
sysfs_root = NULL;
}
}
static int __init init_incfs_module(void)
{
int err = 0;
err = init_sysfs();
if (err)
return err;
err = register_filesystem(&incfs_fs_type);
if (err)
cleanup_sysfs();
return err;
}
static void __exit cleanup_incfs_module(void)
{
cleanup_sysfs();
unregister_filesystem(&incfs_fs_type);
}
module_init(init_incfs_module);
module_exit(cleanup_incfs_module);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eugene Zemtsov <ezemtsov@google.com>");
MODULE_DESCRIPTION("Incremental File System");

2181
fs/incfs/vfs.c Normal file

File diff suppressed because it is too large Load Diff

13
fs/incfs/vfs.h Normal file
View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2018 Google LLC
*/
#ifndef _INCFS_VFS_H
#define _INCFS_VFS_H
void incfs_kill_sb(struct super_block *sb);
struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
const char *dev_name, void *data);
#endif

View File

@ -1122,7 +1122,8 @@ static int may_linkat(struct path *link)
* may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
* should be allowed, or not, on files that already
* exist.
* @dir: the sticky parent directory
* @dir_mode: mode bits of directory
* @dir_uid: owner of directory
* @inode: the inode of the file to open
*
* Block an O_CREAT open of a FIFO (or a regular file) when:
@ -1138,18 +1139,18 @@ static int may_linkat(struct path *link)
*
* Returns 0 if the open is allowed, -ve on error.
*/
static int may_create_in_sticky(struct dentry * const dir,
static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid,
struct inode * const inode)
{
if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
(!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
likely(!(dir_mode & S_ISVTX)) ||
uid_eq(inode->i_uid, dir_uid) ||
uid_eq(current_fsuid(), inode->i_uid))
return 0;
if (likely(dir->d_inode->i_mode & 0002) ||
(dir->d_inode->i_mode & 0020 &&
if (likely(dir_mode & 0002) ||
(dir_mode & 0020 &&
((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
(sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
return -EACCES;
@ -3384,6 +3385,8 @@ static int do_last(struct nameidata *nd,
int *opened)
{
struct dentry *dir = nd->path.dentry;
kuid_t dir_uid = dir->d_inode->i_uid;
umode_t dir_mode = dir->d_inode->i_mode;
int open_flag = op->open_flag;
bool will_truncate = (open_flag & O_TRUNC) != 0;
bool got_write = false;
@ -3519,7 +3522,7 @@ finish_open:
error = -EISDIR;
if (d_is_dir(nd->path.dentry))
goto out;
error = may_create_in_sticky(dir,
error = may_create_in_sticky(dir_mode, dir_uid,
d_backing_inode(nd->path.dentry));
if (unlikely(error))
goto out;

View File

@ -297,6 +297,7 @@ gen_headers_out_arm = [
"linux/in.h",
"linux/in6.h",
"linux/in_route.h",
"linux/incrementalfs.h",
"linux/inet_diag.h",
"linux/inotify.h",
"linux/input-event-codes.h",

View File

@ -293,6 +293,7 @@ gen_headers_out_arm64 = [
"linux/in.h",
"linux/in6.h",
"linux/in_route.h",
"linux/incrementalfs.h",
"linux/inet_diag.h",
"linux/inotify.h",
"linux/input-event-codes.h",

View File

@ -86,6 +86,14 @@
* contain all bit positions from 0 to 'bits' - 1.
*/
/*
* Allocation and deallocation of bitmap.
* Provided in lib/bitmap.c to avoid circular dependency.
*/
extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
extern void bitmap_free(const unsigned long *bitmap);
/*
* lib/bitmap.c provides these functions:
*/

View File

@ -3351,6 +3351,7 @@ int dev_set_alias(struct net_device *, const char *, size_t);
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
int __dev_set_mtu(struct net_device *, int);
int dev_set_mtu(struct net_device *, int);
int dev_validate_mtu(struct net_device *dev, int mtu);
void dev_set_group(struct net_device *, int);
int dev_set_mac_address(struct net_device *, struct sockaddr *);
int dev_change_carrier(struct net_device *, bool new_carrier);

View File

@ -445,13 +445,6 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
sizeof(*addr));
}
/* Calculate the bytes required to store the inclusive range of a-b */
static inline int
bitmap_bytes(u32 a, u32 b)
{
return 4 * ((((b - a + 8) / 8) + 3) / 4);
}
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_comment.h>
#include <linux/netfilter/ipset/ip_set_counter.h>

View File

@ -66,7 +66,11 @@ TRACE_EVENT(xen_mc_callback,
TP_PROTO(xen_mc_callback_fn_t fn, void *data),
TP_ARGS(fn, data),
TP_STRUCT__entry(
__field(xen_mc_callback_fn_t, fn)
/*
* Use field_struct to avoid is_signed_type()
* comparison of a function pointer.
*/
__field_struct(xen_mc_callback_fn_t, fn)
__field(void *, data)
),
TP_fast_assign(

View File

@ -0,0 +1,244 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Userspace interface for Incremental FS.
*
* Incremental FS is special-purpose Linux virtual file system that allows
* execution of a program while its binary and resource files are still being
* lazily downloaded over the network, USB etc.
*
* Copyright 2019 Google LLC
*/
#ifndef _UAPI_LINUX_INCREMENTALFS_H
#define _UAPI_LINUX_INCREMENTALFS_H
#include <linux/limits.h>
#include <linux/ioctl.h>
#include <linux/types.h>
#include <linux/xattr.h>
/* ===== constants ===== */
#define INCFS_NAME "incremental-fs"
#define INCFS_MAGIC_NUMBER (0x5346434e49ul)
#define INCFS_DATA_FILE_BLOCK_SIZE 4096
#define INCFS_HEADER_VER 1
// TODO: This value is assumed in incfs_copy_signature_info_from_user to be the
// actual signature length. Set back to 64 when fixed.
#define INCFS_MAX_HASH_SIZE 32
#define INCFS_MAX_FILE_ATTR_SIZE 512
#define INCFS_PENDING_READS_FILENAME ".pending_reads"
#define INCFS_LOG_FILENAME ".log"
#define INCFS_XATTR_ID_NAME (XATTR_USER_PREFIX "incfs.id")
#define INCFS_XATTR_SIZE_NAME (XATTR_USER_PREFIX "incfs.size")
#define INCFS_XATTR_METADATA_NAME (XATTR_USER_PREFIX "incfs.metadata")
#define INCFS_MAX_SIGNATURE_SIZE 8096
#define INCFS_IOCTL_BASE_CODE 'g'
/* ===== ioctl requests on the command dir ===== */
/* Create a new file */
#define INCFS_IOC_CREATE_FILE \
_IOWR(INCFS_IOCTL_BASE_CODE, 30, struct incfs_new_file_args)
/* Read file signature */
#define INCFS_IOC_READ_FILE_SIGNATURE \
_IOWR(INCFS_IOCTL_BASE_CODE, 31, struct incfs_get_file_sig_args)
enum incfs_compression_alg {
COMPRESSION_NONE = 0,
COMPRESSION_LZ4 = 1
};
enum incfs_block_flags {
INCFS_BLOCK_FLAGS_NONE = 0,
INCFS_BLOCK_FLAGS_HASH = 1,
};
typedef struct {
__u8 bytes[16];
} incfs_uuid_t __attribute__((aligned (8)));
/*
* Description of a pending read. A pending read - a read call by
* a userspace program for which the filesystem currently doesn't have data.
*/
struct incfs_pending_read_info {
/* Id of a file that is being read from. */
incfs_uuid_t file_id;
/* A number of microseconds since system boot to the read. */
__aligned_u64 timestamp_us;
/* Index of a file block that is being read. */
__u32 block_index;
/* A serial number of this pending read. */
__u32 serial_number;
};
/*
* A struct to be written into a control file to load a data or hash
* block to a data file.
*/
struct incfs_new_data_block {
/* Index of a data block. */
__u32 block_index;
/* Length of data */
__u32 data_len;
/*
* A pointer to an actual data for the block.
*
* Equivalent to: __u8 *data;
*/
__aligned_u64 data;
/*
* Compression algorithm used to compress the data block.
* Values from enum incfs_compression_alg.
*/
__u8 compression;
/* Values from enum incfs_block_flags */
__u8 flags;
__u16 reserved1;
__u32 reserved2;
__aligned_u64 reserved3;
};
enum incfs_hash_tree_algorithm {
INCFS_HASH_TREE_NONE = 0,
INCFS_HASH_TREE_SHA256 = 1
};
struct incfs_file_signature_info {
/*
* A pointer to file's root hash (if determined != 0)
* Actual hash size determined by hash_tree_alg.
* Size of the buffer should be at least INCFS_MAX_HASH_SIZE
*
* Equivalent to: u8 *root_hash;
*/
__aligned_u64 root_hash;
/*
* A pointer to additional data that was attached to the root hash
* before signing.
*
* Equivalent to: u8 *additional_data;
*/
__aligned_u64 additional_data;
/* Size of additional data. */
__u32 additional_data_size;
__u32 reserved1;
/*
* A pointer to pkcs7 signature DER blob.
*
* Equivalent to: u8 *signature;
*/
__aligned_u64 signature;
/* Size of pkcs7 signature DER blob */
__u32 signature_size;
__u32 reserved2;
/* Value from incfs_hash_tree_algorithm */
__u8 hash_tree_alg;
};
/*
* Create a new file or directory.
*/
struct incfs_new_file_args {
/* Id of a file to create. */
incfs_uuid_t file_id;
/*
* Total size of the new file. Ignored if S_ISDIR(mode).
*/
__aligned_u64 size;
/*
* File mode. Permissions and dir flag.
*/
__u16 mode;
__u16 reserved1;
__u32 reserved2;
/*
* A pointer to a null-terminated relative path to the file's parent
* dir.
* Max length: PATH_MAX
*
* Equivalent to: char *directory_path;
*/
__aligned_u64 directory_path;
/*
* A pointer to a null-terminated file's name.
* Max length: PATH_MAX
*
* Equivalent to: char *file_name;
*/
__aligned_u64 file_name;
/*
* A pointer to a file attribute to be set on creation.
*
* Equivalent to: u8 *file_attr;
*/
__aligned_u64 file_attr;
/*
* Length of the data buffer specfied by file_attr.
* Max value: INCFS_MAX_FILE_ATTR_SIZE
*/
__u32 file_attr_len;
__u32 reserved4;
/* struct incfs_file_signature_info *signature_info; */
__aligned_u64 signature_info;
__aligned_u64 reserved5;
__aligned_u64 reserved6;
};
/*
* Request a digital signature blob for a given file.
* Argument for INCFS_IOC_READ_FILE_SIGNATURE ioctl
*/
struct incfs_get_file_sig_args {
/*
* A pointer to the data buffer to save an signature blob to.
*
* Equivalent to: u8 *file_signature;
*/
__aligned_u64 file_signature;
/* Size of the buffer at file_signature. */
__u32 file_signature_buf_size;
/*
* Number of bytes save file_signature buffer.
* It is set after ioctl done.
*/
__u32 file_signature_len_out;
};
#endif /* _UAPI_LINUX_INCREMENTALFS_H */

View File

@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
@ -1212,3 +1213,22 @@ void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int n
}
EXPORT_SYMBOL(bitmap_copy_le);
#endif
unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
{
return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
flags);
}
EXPORT_SYMBOL(bitmap_alloc);
unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
{
return bitmap_alloc(nbits, flags | __GFP_ZERO);
}
EXPORT_SYMBOL(bitmap_zalloc);
void bitmap_free(const unsigned long *bitmap)
{
kfree(bitmap);
}
EXPORT_SYMBOL(bitmap_free);

View File

@ -6960,18 +6960,9 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
if (new_mtu == dev->mtu)
return 0;
/* MTU must be positive, and in range */
if (new_mtu < 0 || new_mtu < dev->min_mtu) {
net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
dev->name, new_mtu, dev->min_mtu);
return -EINVAL;
}
if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
dev->name, new_mtu, dev->max_mtu);
return -EINVAL;
}
err = dev_validate_mtu(dev, new_mtu);
if (err)
return err;
if (!netif_device_present(dev))
return -ENODEV;
@ -7743,8 +7734,10 @@ int register_netdevice(struct net_device *dev)
goto err_uninit;
ret = netdev_register_kobject(dev);
if (ret)
if (ret) {
dev->reg_state = NETREG_UNREGISTERED;
goto err_uninit;
}
dev->reg_state = NETREG_REGISTERED;
__netdev_update_features(dev);
@ -7843,6 +7836,23 @@ int init_dummy_netdev(struct net_device *dev)
EXPORT_SYMBOL_GPL(init_dummy_netdev);
int dev_validate_mtu(struct net_device *dev, int new_mtu)
{
/* MTU must be positive, and in range */
if (new_mtu < 0 || new_mtu < dev->min_mtu) {
net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
dev->name, new_mtu, dev->min_mtu);
return -EINVAL;
}
if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
dev->name, new_mtu, dev->max_mtu);
return -EINVAL;
}
return 0;
}
/**
* register_netdev - register a network device
* @dev: device to register

View File

@ -911,25 +911,30 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
struct kobject *kobj = &queue->kobj;
int error = 0;
/* Kobject_put later will trigger rx_queue_release call which
* decreases dev refcount: Take that reference here
*/
dev_hold(queue->dev);
kobj->kset = dev->queues_kset;
error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
"rx-%u", index);
if (error)
return error;
dev_hold(queue->dev);
goto err;
if (dev->sysfs_rx_queue_group) {
error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
if (error) {
kobject_put(kobj);
return error;
}
if (error)
goto err;
}
kobject_uevent(kobj, KOBJ_ADD);
return error;
err:
kobject_put(kobj);
return error;
}
#endif /* CONFIG_SYSFS */
@ -1322,25 +1327,29 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
struct kobject *kobj = &queue->kobj;
int error = 0;
/* Kobject_put later will trigger netdev_queue_release call
* which decreases dev refcount: Take that reference here
*/
dev_hold(queue->dev);
kobj->kset = dev->queues_kset;
error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
"tx-%u", index);
if (error)
return error;
dev_hold(queue->dev);
goto err;
#ifdef CONFIG_BQL
error = sysfs_create_group(kobj, &dql_group);
if (error) {
kobject_put(kobj);
return error;
}
if (error)
goto err;
#endif
kobject_uevent(kobj, KOBJ_ADD);
return 0;
err:
kobject_put(kobj);
return error;
}
#endif /* CONFIG_SYSFS */

View File

@ -2466,8 +2466,17 @@ struct net_device *rtnl_create_link(struct net *net,
dev->rtnl_link_ops = ops;
dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
if (tb[IFLA_MTU])
dev->mtu = nla_get_u32(tb[IFLA_MTU]);
if (tb[IFLA_MTU]) {
u32 mtu = nla_get_u32(tb[IFLA_MTU]);
int err;
err = dev_validate_mtu(dev, mtu);
if (err) {
free_netdev(dev);
return ERR_PTR(err);
}
dev->mtu = mtu;
}
if (tb[IFLA_ADDRESS]) {
memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
nla_len(tb[IFLA_ADDRESS]));

View File

@ -1202,10 +1202,8 @@ int ip_tunnel_init(struct net_device *dev)
iph->version = 4;
iph->ihl = 5;
if (tunnel->collect_md) {
dev->features |= NETIF_F_NETNS_LOCAL;
if (tunnel->collect_md)
netif_keep_dst(dev);
}
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_init);

View File

@ -678,8 +678,7 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
* bandwidth sample. Delivered is in packets and interval_us in uS and
* ratio will be <<1 for most connections. So delivered is first scaled.
*/
bw = (u64)rs->delivered * BW_UNIT;
do_div(bw, rs->interval_us);
bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
/* If this sample is application-limited, it is likely to have a very
* low delivered count that represents application behavior rather than

View File

@ -1878,10 +1878,8 @@ static int ip6_tnl_dev_init(struct net_device *dev)
if (err)
return err;
ip6_tnl_link_config(t);
if (t->parms.collect_md) {
dev->features |= NETIF_F_NETNS_LOCAL;
if (t->parms.collect_md)
netif_keep_dst(dev);
}
return 0;
}

View File

@ -27,6 +27,7 @@
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/dst_cache.h>
#include <net/ip_tunnels.h>
#ifdef CONFIG_IPV6_SEG6_HMAC
#include <net/seg6_hmac.h>
#endif
@ -126,7 +127,8 @@ static bool decap_and_validate(struct sk_buff *skb, int proto)
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
skb->encapsulation = 0;
if (iptunnel_pull_offloads(skb))
return false;
return true;
}

View File

@ -79,7 +79,7 @@ mtype_flush(struct ip_set *set)
if (set->extensions & IPSET_EXT_DESTROY)
mtype_ext_cleanup(set);
memset(map->members, 0, map->memsize);
bitmap_zero(map->members, map->elements);
set->elements = 0;
set->ext_size = 0;
}

View File

@ -40,7 +40,7 @@ MODULE_ALIAS("ip_set_bitmap:ip");
/* Type structure */
struct bitmap_ip {
void *members; /* the set members */
unsigned long *members; /* the set members */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
@ -222,7 +222,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
u32 first_ip, u32 last_ip,
u32 elements, u32 hosts, u8 netmask)
{
map->members = ip_set_alloc(map->memsize);
map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_ip = first_ip;
@ -315,7 +315,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
if (!map)
return -ENOMEM;
map->memsize = bitmap_bytes(0, elements - 1);
map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_ip;
if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) {

View File

@ -46,7 +46,7 @@ enum {
/* Type structure */
struct bitmap_ipmac {
void *members; /* the set members */
unsigned long *members; /* the set members */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
@ -299,7 +299,7 @@ static bool
init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
u32 first_ip, u32 last_ip, u32 elements)
{
map->members = ip_set_alloc(map->memsize);
map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_ip = first_ip;
@ -363,7 +363,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
if (!map)
return -ENOMEM;
map->memsize = bitmap_bytes(0, elements - 1);
map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_ipmac;
if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
kfree(map);

View File

@ -34,7 +34,7 @@ MODULE_ALIAS("ip_set_bitmap:port");
/* Type structure */
struct bitmap_port {
void *members; /* the set members */
unsigned long *members; /* the set members */
u16 first_port; /* host byte order, included in range */
u16 last_port; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
@ -207,7 +207,7 @@ static bool
init_map_port(struct ip_set *set, struct bitmap_port *map,
u16 first_port, u16 last_port)
{
map->members = ip_set_alloc(map->memsize);
map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_port = first_port;
@ -250,7 +250,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
return -ENOMEM;
map->elements = elements;
map->memsize = bitmap_bytes(0, map->elements);
map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_port;
if (!init_map_port(set, map, first_port, last_port)) {
kfree(map);

View File

@ -267,12 +267,12 @@ static int tcf_em_validate(struct tcf_proto *tp,
}
em->data = (unsigned long) v;
}
em->datalen = data_len;
}
}
em->matchid = em_hdr->matchid;
em->flags = em_hdr->flags;
em->datalen = data_len;
em->net = net;
err = 0;

View File

@ -764,6 +764,10 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
if (sk->sk_state == TCP_ESTABLISHED)
goto out;
rc = -EALREADY; /* Do nothing if call is already in progress */
if (sk->sk_state == TCP_SYN_SENT)
goto out;
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
@ -810,7 +814,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
/* Now the loop */
rc = -EINPROGRESS;
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
goto out_put_neigh;
goto out;
rc = x25_wait_for_connection_establishment(sk);
if (rc)

View File

@ -53,6 +53,10 @@
#define R_AARCH64_ABS64 257
#endif
#define R_ARM_PC24 1
#define R_ARM_THM_CALL 10
#define R_ARM_CALL 28
static int fd_map; /* File descriptor for file being modified. */
static int mmap_failed; /* Boolean flag. */
static char gpfx; /* prefix for global symbol name (sometimes '_') */
@ -429,6 +433,18 @@ is_mcounted_section_name(char const *const txtname)
#define RECORD_MCOUNT_64
#include "recordmcount.h"
static int arm_is_fake_mcount(Elf32_Rel const *rp)
{
switch (ELF32_R_TYPE(w(rp->r_info))) {
case R_ARM_THM_CALL:
case R_ARM_CALL:
case R_ARM_PC24:
return 0;
}
return 1;
}
/* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
* http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
* We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
@ -530,6 +546,7 @@ do_file(char const *const fname)
altmcount = "__gnu_mcount_nc";
make_nop = make_nop_arm;
rel_type_nop = R_ARM_NONE;
is_fake_mcount32 = arm_is_fake_mcount;
break;
case EM_AARCH64:
reltype = R_AARCH64_ABS64;

View File

@ -0,0 +1 @@
incfs_test

View File

@ -0,0 +1,18 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS += -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -lssl -lcrypto -llz4
CFLAGS += -I../../../../../usr/include/
CFLAGS += -I../../../../include/uapi/
CFLAGS += -I../../../../lib
EXTRA_SOURCES := utils.c
CFLAGS += $(EXTRA_SOURCES)
TEST_GEN_PROGS := incfs_test
include ../../lib.mk
$(OUTPUT)incfs_test: incfs_test.c $(EXTRA_SOURCES)
all: $(OUTPUT)incfs_test
clean:
rm -rf $(OUTPUT)incfs_test *.o

View File

@ -0,0 +1 @@
CONFIG_INCREMENTAL_FS=y

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,377 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018 Google LLC
*/
#include <stdio.h>
#include <fcntl.h>
#include <dirent.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
#include <errno.h>
#include <string.h>
#include <poll.h>
#include <openssl/bio.h>
#include <openssl/err.h>
#include <openssl/pem.h>
#include <openssl/pkcs7.h>
#include <openssl/sha.h>
#include <openssl/md5.h>
#include "utils.h"
int mount_fs(char *mount_dir, char *backing_dir, int read_timeout_ms)
{
static const char fs_name[] = INCFS_NAME;
char mount_options[512];
int result;
snprintf(mount_options, ARRAY_SIZE(mount_options),
"read_timeout_ms=%u",
read_timeout_ms);
result = mount(backing_dir, mount_dir, fs_name, 0, mount_options);
if (result != 0)
perror("Error mounting fs.");
return result;
}
int mount_fs_opt(char *mount_dir, char *backing_dir, char *opt)
{
static const char fs_name[] = INCFS_NAME;
int result;
result = mount(backing_dir, mount_dir, fs_name, 0, opt);
if (result != 0)
perror("Error mounting fs.");
return result;
}
int unlink_node(int fd, int parent_ino, char *filename)
{
return 0;
}
static EVP_PKEY *deserialize_private_key(const char *pem_key)
{
BIO *bio = NULL;
EVP_PKEY *pkey = NULL;
int len = strlen(pem_key);
bio = BIO_new_mem_buf(pem_key, len);
if (!bio)
return NULL;
pkey = PEM_read_bio_PrivateKey(bio, NULL, NULL, NULL);
BIO_free(bio);
return pkey;
}
static X509 *deserialize_cert(const char *pem_cert)
{
BIO *bio = NULL;
X509 *cert = NULL;
int len = strlen(pem_cert);
bio = BIO_new_mem_buf(pem_cert, len);
if (!bio)
return NULL;
cert = PEM_read_bio_X509(bio, NULL, NULL, NULL);
BIO_free(bio);
return cert;
}
bool sign_pkcs7(const void *data_to_sign, size_t data_size,
char *pkey_pem, char *cert_pem,
void **sig_ret, size_t *sig_size_ret)
{
/*
* PKCS#7 signing flags:
*
* - PKCS7_BINARY signing binary data, so skip MIME translation
*
* - PKCS7_NOATTR omit extra authenticated attributes, such as
* SMIMECapabilities
*
* - PKCS7_PARTIAL PKCS7_sign() creates a handle only, then
* PKCS7_sign_add_signer() can add a signer later.
* This is necessary to change the message digest
* algorithm from the default of SHA-1. Requires
* OpenSSL 1.0.0 or later.
*/
int pkcs7_flags = PKCS7_BINARY | PKCS7_NOATTR | PKCS7_PARTIAL;
void *sig;
size_t sig_size;
BIO *bio = NULL;
PKCS7 *p7 = NULL;
EVP_PKEY *pkey = NULL;
X509 *cert = NULL;
bool ok = false;
const EVP_MD *md = EVP_sha256();
pkey = deserialize_private_key(pkey_pem);
if (!pkey) {
printf("deserialize_private_key failed\n");
goto out;
}
cert = deserialize_cert(cert_pem);
if (!cert) {
printf("deserialize_cert failed\n");
goto out;
}
bio = BIO_new_mem_buf(data_to_sign, data_size);
if (!bio)
goto out;
p7 = PKCS7_sign(NULL, NULL, NULL, bio, pkcs7_flags);
if (!p7) {
printf("failed to initialize PKCS#7 signature object\n");
goto out;
}
if (!PKCS7_sign_add_signer(p7, cert, pkey, md, pkcs7_flags)) {
printf("failed to add signer to PKCS#7 signature object\n");
goto out;
}
if (PKCS7_final(p7, bio, pkcs7_flags) != 1) {
printf("failed to finalize PKCS#7 signature\n");
goto out;
}
BIO_free(bio);
bio = BIO_new(BIO_s_mem());
if (!bio) {
printf("out of memory\n");
goto out;
}
if (i2d_PKCS7_bio(bio, p7) != 1) {
printf("failed to DER-encode PKCS#7 signature object\n");
goto out;
}
sig_size = BIO_get_mem_data(bio, &sig);
*sig_ret = malloc(sig_size);
memcpy(*sig_ret, sig, sig_size);
*sig_size_ret = sig_size;
ok = true;
out:
PKCS7_free(p7);
BIO_free(bio);
return ok;
}
int crypto_emit_file(int fd, char *dir, char *filename, incfs_uuid_t *id_out,
size_t size, const char *root_hash, char *sig, size_t sig_size,
char *add_data)
{
int mode = __S_IFREG | 0555;
struct incfs_file_signature_info sig_info = {
.hash_tree_alg = root_hash
? INCFS_HASH_TREE_SHA256
: 0,
.root_hash = ptr_to_u64(root_hash),
.additional_data = ptr_to_u64(add_data),
.additional_data_size = strlen(add_data),
.signature = ptr_to_u64(sig),
.signature_size = sig_size,
};
struct incfs_new_file_args args = {
.size = size,
.mode = mode,
.file_name = ptr_to_u64(filename),
.directory_path = ptr_to_u64(dir),
.signature_info = ptr_to_u64(&sig_info),
.file_attr = 0,
.file_attr_len = 0
};
md5(filename, strlen(filename), (char *)args.file_id.bytes);
if (ioctl(fd, INCFS_IOC_CREATE_FILE, &args) != 0)
return -errno;
*id_out = args.file_id;
return 0;
}
int emit_file(int fd, char *dir, char *filename, incfs_uuid_t *id_out,
size_t size, char *attr)
{
int mode = __S_IFREG | 0555;
struct incfs_file_signature_info sig_info = {
.hash_tree_alg = 0,
.root_hash = ptr_to_u64(NULL)
};
struct incfs_new_file_args args = {
.size = size,
.mode = mode,
.file_name = ptr_to_u64(filename),
.directory_path = ptr_to_u64(dir),
.signature_info = ptr_to_u64(&sig_info),
.file_attr = ptr_to_u64(attr),
.file_attr_len = attr ? strlen(attr) : 0
};
md5(filename, strlen(filename), (char *)args.file_id.bytes);
if (ioctl(fd, INCFS_IOC_CREATE_FILE, &args) != 0)
return -errno;
*id_out = args.file_id;
return 0;
}
int get_file_bmap(int cmd_fd, int ino, unsigned char *buf, int buf_size)
{
return 0;
}
int get_file_signature(int fd, unsigned char *buf, int buf_size)
{
struct incfs_get_file_sig_args args = {
.file_signature = ptr_to_u64(buf),
.file_signature_buf_size = buf_size
};
if (ioctl(fd, INCFS_IOC_READ_FILE_SIGNATURE, &args) == 0)
return args.file_signature_len_out;
return -errno;
}
loff_t get_file_size(char *name)
{
struct stat st;
if (stat(name, &st) == 0)
return st.st_size;
return -ENOENT;
}
int open_commands_file(char *mount_dir)
{
char cmd_file[255];
int cmd_fd;
snprintf(cmd_file, ARRAY_SIZE(cmd_file),
"%s/%s", mount_dir, INCFS_PENDING_READS_FILENAME);
cmd_fd = open(cmd_file, O_RDONLY);
if (cmd_fd < 0)
perror("Can't open commands file");
return cmd_fd;
}
int open_log_file(char *mount_dir)
{
char cmd_file[255];
int cmd_fd;
snprintf(cmd_file, ARRAY_SIZE(cmd_file), "%s/.log", mount_dir);
cmd_fd = open(cmd_file, O_RDWR);
if (cmd_fd < 0)
perror("Can't open log file");
return cmd_fd;
}
int wait_for_pending_reads(int fd, int timeout_ms,
struct incfs_pending_read_info *prs, int prs_count)
{
ssize_t read_res = 0;
if (timeout_ms > 0) {
int poll_res = 0;
struct pollfd pollfd = {
.fd = fd,
.events = POLLIN
};
poll_res = poll(&pollfd, 1, timeout_ms);
if (poll_res < 0)
return -errno;
if (poll_res == 0)
return 0;
if (!(pollfd.revents | POLLIN))
return 0;
}
read_res = read(fd, prs, prs_count * sizeof(*prs));
if (read_res < 0)
return -errno;
return read_res / sizeof(*prs);
}
char *concat_file_name(const char *dir, char *file)
{
char full_name[FILENAME_MAX] = "";
if (snprintf(full_name, ARRAY_SIZE(full_name), "%s/%s", dir, file) < 0)
return NULL;
return strdup(full_name);
}
int delete_dir_tree(const char *dir_path)
{
DIR *dir = NULL;
struct dirent *dp;
int result = 0;
dir = opendir(dir_path);
if (!dir) {
result = -errno;
goto out;
}
while ((dp = readdir(dir))) {
char *full_path;
if (!strcmp(dp->d_name, ".") || !strcmp(dp->d_name, ".."))
continue;
full_path = concat_file_name(dir_path, dp->d_name);
if (dp->d_type == DT_DIR)
result = delete_dir_tree(full_path);
else
result = unlink(full_path);
free(full_path);
if (result)
goto out;
}
out:
if (dir)
closedir(dir);
if (!result)
rmdir(dir_path);
return result;
}
void sha256(char *data, size_t dsize, char *hash)
{
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, data, dsize);
SHA256_Final((unsigned char *)hash, &ctx);
}
void md5(char *data, size_t dsize, char *hash)
{
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, data, dsize);
MD5_Final((unsigned char *)hash, &ctx);
}

View File

@ -0,0 +1,59 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 Google LLC
*/
#include <stdbool.h>
#include <sys/stat.h>
#include "../../include/uapi/linux/incrementalfs.h"
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
#ifdef __LP64__
#define ptr_to_u64(p) ((__u64)p)
#else
#define ptr_to_u64(p) ((__u64)(__u32)p)
#endif
#define SHA256_DIGEST_SIZE 32
int mount_fs(char *mount_dir, char *backing_dir, int read_timeout_ms);
int mount_fs_opt(char *mount_dir, char *backing_dir, char *opt);
int get_file_bmap(int cmd_fd, int ino, unsigned char *buf, int buf_size);
int get_file_signature(int fd, unsigned char *buf, int buf_size);
int emit_node(int fd, char *filename, int *ino_out, int parent_ino,
size_t size, mode_t mode, char *attr);
int emit_file(int fd, char *dir, char *filename, incfs_uuid_t *id_out,
size_t size, char *attr);
int crypto_emit_file(int fd, char *dir, char *filename, incfs_uuid_t *id_out,
size_t size, const char *root_hash, char *sig, size_t sig_size,
char *add_data);
int unlink_node(int fd, int parent_ino, char *filename);
loff_t get_file_size(char *name);
int open_commands_file(char *mount_dir);
int open_log_file(char *mount_dir);
int wait_for_pending_reads(int fd, int timeout_ms,
struct incfs_pending_read_info *prs, int prs_count);
char *concat_file_name(const char *dir, char *file);
void sha256(char *data, size_t dsize, char *hash);
void md5(char *data, size_t dsize, char *hash);
bool sign_pkcs7(const void *data_to_sign, size_t data_size,
char *pkey_pem, char *cert_pem,
void **sig_ret, size_t *sig_size_ret);
int delete_dir_tree(const char *path);