From d28764fd7b0372e401833397ec8d4e3f5821245b Mon Sep 17 00:00:00 2001 From: Christoph Strobl Date: Fri, 7 Feb 2025 14:19:01 +0100 Subject: [PATCH 01/13] Prepare issue branch --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index fd73e23ebd..1f6369e3b6 100644 --- a/pom.xml +++ b/pom.xml @@ -5,7 +5,7 @@ org.springframework.data spring-data-redis - 3.5.0-SNAPSHOT + 3.5.x-GH-3054-SNAPSHOT Spring Data Redis Spring Data module for Redis From c13c38e136657ac72abff6518a7e8c1f15195ff3 Mon Sep 17 00:00:00 2001 From: Tihomir Mateev Date: Mon, 25 Nov 2024 13:58:32 +0200 Subject: [PATCH 02/13] Introduce Hash Field Expiration to the Spring Data Redis framework Signed-off-by: Tihomir Mateev --- Makefile | 2 +- .../DefaultStringRedisConnection.java | 70 +++ .../connection/DefaultedRedisConnection.java | 50 ++ .../connection/ReactiveHashCommands.java | 511 +++++++++++++++++- .../redis/connection/RedisHashCommands.java | 109 ++++ .../connection/StringRedisConnection.java | 107 ++++ .../jedis/JedisClusterHashCommands.java | 88 +++ .../connection/jedis/JedisHashCommands.java | 38 ++ .../lettuce/LettuceHashCommands.java | 37 ++ .../lettuce/LettuceReactiveHashCommands.java | 84 +++ .../data/redis/core/BoundHashOperations.java | 76 +++ .../redis/core/DefaultHashOperations.java | 45 ++ .../data/redis/core/HashOperations.java | 80 +++ .../support/collections/DefaultRedisMap.java | 30 + .../redis/support/collections/RedisMap.java | 73 +++ .../support/collections/RedisProperties.java | 43 +- .../AbstractConnectionIntegrationTests.java | 189 +++++++ .../jedis/JedisClusterConnectionTests.java | 144 +++++ .../LettuceClusterConnectionTests.java | 143 +++++ ...eReactiveHashCommandsIntegrationTests.java | 60 ++ ...DefaultHashOperationsIntegrationTests.java | 80 +++ .../AbstractRedisMapIntegrationTests.java | 33 ++ 22 files changed, 2052 insertions(+), 40 deletions(-) diff --git a/Makefile b/Makefile index d2051060c1..1f6dee240f 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -VERSION?=7.2.5 +VERSION?=7.4.0 PROJECT?=redis GH_ORG?=redis SPRING_PROFILE?=ci diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java index 8fe2f2c9f7..cb815eb412 100644 --- a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java @@ -2566,6 +2566,76 @@ public Long hStrLen(byte[] key, byte[] field) { return convertAndReturn(delegate.hStrLen(key, field), Converters.identityConverter()); } + @Override + public List hExpire(byte[] key, long seconds, byte[]... fields) { + return this.delegate.hExpire(key, seconds, fields); + } + + @Override + public List hpExpire(byte[] key, long millis, byte[]... fields) { + return this.delegate.hpExpire(key, millis, fields); + } + + @Override + public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + return this.delegate.hExpireAt(key, unixTime, fields); + } + + @Override + public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + return this.delegate.hpExpireAt(key, unixTimeInMillis, fields); + } + + @Override + public List hPersist(byte[] key, byte[]... fields) { + return this.delegate.hPersist(key, fields); + } + + @Override + public List hTtl(byte[] key, byte[]... fields) { + return this.delegate.hTtl(key, fields); + } + + @Override + public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + return this.delegate.hTtl(key, timeUnit, fields); + } + + @Override + public List hExpire(String key, long seconds, String... fields) { + return hExpire(serialize(key), seconds, serializeMulti(fields)); + } + + @Override + public List hpExpire(String key, long millis, String... fields) { + return hpExpire(serialize(key), millis, serializeMulti(fields)); + } + + @Override + public List hExpireAt(String key, long unixTime, String... fields) { + return hExpireAt(serialize(key), unixTime, serializeMulti(fields)); + } + + @Override + public List hpExpireAt(String key, long unixTimeInMillis, String... fields) { + return hpExpireAt(serialize(key), unixTimeInMillis, serializeMulti(fields)); + } + + @Override + public List hPersist(String key, String... fields) { + return hPersist(serialize(key), serializeMulti(fields)); + } + + @Override + public List hTtl(String key, String... fields) { + return hTtl(serialize(key), serializeMulti(fields)); + } + + @Override + public List hTtl(String key, TimeUnit timeUnit, String... fields) { + return hTtl(serialize(key), timeUnit, serializeMulti(fields)); + } + @Override public void setClientName(byte[] name) { this.delegate.setClientName(name); diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java index aa5f6de773..ddbcc68d2c 100644 --- a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java @@ -65,6 +65,7 @@ * @author ihaohong * @author Dennis Neufeld * @author Shyngys Sapraliyev + * @author Tihomir Mateev * @since 2.0 */ @Deprecated @@ -1470,6 +1471,55 @@ default Long hStrLen(byte[] key, byte[] field) { return hashCommands().hStrLen(key, field); } + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hExpire(byte[] key, long seconds, byte[]... fields) { + return hashCommands().hExpire(key, seconds, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hpExpire(byte[] key, long millis, byte[]... fields) { + return hashCommands().hpExpire(key, millis, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + return hashCommands().hExpireAt(key, unixTime, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + return hashCommands().hpExpireAt(key, unixTimeInMillis, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hPersist(byte[] key, byte[]... fields) { + return hashCommands().hPersist(key, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hTtl(byte[] key, byte[]... fields) { + return hashCommands().hTtl(key, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + return hashCommands().hTtl(key, timeUnit, fields); + } + // GEO COMMANDS /** @deprecated in favor of {@link RedisConnection#geoCommands()}}. */ diff --git a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java index 0fae8d30b8..35e3437141 100644 --- a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java @@ -19,6 +19,8 @@ import reactor.core.publisher.Mono; import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -44,10 +46,34 @@ * * @author Christoph Strobl * @author Mark Paluch + * @author Tihomir Mateev * @since 2.0 */ public interface ReactiveHashCommands { + /** + * {@link Command} for hash-bound operations. + * + * @author Christoph Strobl + * @author Tihomir Mateev + */ + class HashFieldsCommand extends KeyCommand { + + private final List fields; + + private HashFieldsCommand(@Nullable ByteBuffer key, List fields) { + super(key); + this.fields = fields; + } + + /** + * @return never {@literal null}. + */ + public List getFields() { + return fields; + } + } + /** * {@literal HSET} {@link Command}. * @@ -216,15 +242,10 @@ default Mono hMSet(ByteBuffer key, Map fieldVal * @author Christoph Strobl * @see Redis Documentation: HGET */ - class HGetCommand extends KeyCommand { - - private List fields; + class HGetCommand extends HashFieldsCommand { private HGetCommand(@Nullable ByteBuffer key, List fields) { - - super(key); - - this.fields = fields; + super(key, fields); } /** @@ -263,14 +284,7 @@ public HGetCommand from(ByteBuffer key) { Assert.notNull(key, "Key must not be null"); - return new HGetCommand(key, fields); - } - - /** - * @return never {@literal null}. - */ - public List getFields() { - return fields; + return new HGetCommand(key, getFields()); } } @@ -394,15 +408,10 @@ default Mono hExists(ByteBuffer key, ByteBuffer field) { * @author Christoph Strobl * @see Redis Documentation: HDEL */ - class HDelCommand extends KeyCommand { - - private final List fields; + class HDelCommand extends HashFieldsCommand { private HDelCommand(@Nullable ByteBuffer key, List fields) { - - super(key); - - this.fields = fields; + super(key, fields); } /** @@ -441,14 +450,7 @@ public HDelCommand from(ByteBuffer key) { Assert.notNull(key, "Key must not be null"); - return new HDelCommand(key, fields); - } - - /** - * @return never {@literal null}. - */ - public List getFields() { - return fields; + return new HDelCommand(key, getFields()); } } @@ -842,4 +844,453 @@ default Mono hStrLen(ByteBuffer key, ByteBuffer field) { * @since 2.1 */ Flux> hStrLen(Publisher commands); + + /** + * @author Tihomir Mateev + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + class Expire extends HashFieldsCommand { + + private final Duration ttl; + + /** + * Creates a new {@link Expire} given a {@code key}, a {@link List} of {@code fields} and a time-to-live + * + * @param key can be {@literal null}. + * @param fields must not be {@literal null}. + * @param ttl the duration of the time to live. + */ + private Expire(@Nullable ByteBuffer key, List fields, Duration ttl) { + + super(key, fields); + this.ttl = ttl; + } + + /** + * Specify the {@code fields} within the hash to set an expiration for. + * + * @param fields must not be {@literal null}. + * @return new instance of {@link Expire}. + */ + public static Expire expire(List fields, Duration ttl) { + + Assert.notNull(fields, "Field must not be null"); + return new Expire(null, fields, ttl); + } + + /** + * Define the {@code key} the hash is stored at. + * + * @param key must not be {@literal null}. + * @return new instance of {@link Expire}. + */ + public Expire from(ByteBuffer key) { + return new Expire(key, getFields(), ttl); + } + + /** + * @return the ttl. + */ + public Duration getTtl() { + return ttl; + } + } + + /** + * Expire a given {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @param duration must not be {@literal null}. + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + default Mono hExpire(ByteBuffer key, Duration duration, ByteBuffer field) { + Assert.notNull(duration, "Duration must not be null"); + + return hExpire(key, duration, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param duration must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + default Flux hExpire(ByteBuffer key, Duration duration, List fields) { + Assert.notNull(duration, "Duration must not be null"); + + return hExpire(Flux.just(Expire.expire(fields, duration).from(key))) + .mapNotNull(NumericResponse::getOutput); + } + + /** + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; + * @since 3.5 + * @see Redis Documentation: HEXPIRE + */ + Flux> hExpire(Publisher commands); + + /** + * Expire a given {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @param duration must not be {@literal null}. + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + default Mono hpExpire(ByteBuffer key, Duration duration, ByteBuffer field) { + Assert.notNull(duration, "Duration must not be null"); + + return hpExpire(key, duration, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param duration must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + default Flux hpExpire(ByteBuffer key, Duration duration, List fields) { + Assert.notNull(duration, "Duration must not be null"); + + return hpExpire(Flux.just(Expire.expire(fields, duration).from(key))) + .mapNotNull(NumericResponse::getOutput); + } + + /** + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; + * @since 3.5 + * @see Redis Documentation: HEXPIRE + */ + Flux> hpExpire(Publisher commands); + + /** + * @author Tihomir Mateev + * @see Redis Documentation: HEXPIREAT + * @since 3.5 + */ + class ExpireAt extends HashFieldsCommand { + + private final Instant expireAt; + + /** + * Creates a new {@link ExpireAt} given a {@code key}, a {@link List} of {@literal fields} and a {@link Instant} + * + * @param key can be {@literal null}. + * @param fields must not be {@literal null}. + * @param expireAt the {@link Instant} to expire at. + */ + private ExpireAt(@Nullable ByteBuffer key, List fields, Instant expireAt) { + + super(key, fields); + this.expireAt = expireAt; + } + + /** + * Specify the {@code fields} within the hash to set an expiration for. + * + * @param fields must not be {@literal null}. + * @return new instance of {@link ExpireAt}. + */ + public static ExpireAt expireAt(List fields, Instant expireAt) { + + Assert.notNull(fields, "Fields must not be null"); + return new ExpireAt(null, fields, expireAt); + } + + /** + * Define the {@code key} the hash is stored at. + * + * @param key must not be {@literal null}. + * @return new instance of {@link ExpireAt}. + */ + public ExpireAt from(ByteBuffer key) { + return new ExpireAt(key, getFields(), expireAt); + } + + /** + * @return the ttl. + */ + public Instant getExpireAt() { + return expireAt; + } + } + + /** + * Expire a given {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in seconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; + * @see Redis Documentation: HEXPIREAT + * @since 3.5 + */ + default Mono hExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field) { + + Assert.notNull(expireAt, "Duration must not be null"); + return hExpireAt(key, expireAt, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in seconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; + * @see Redis Documentation: HEXPIREAT + * @since 3.5 + */ + default Flux hExpireAt(ByteBuffer key, Instant expireAt, List fields) { + Assert.notNull(expireAt, "Duration must not be null"); + + return hExpireAt(Flux.just(ExpireAt.expireAt(fields, expireAt).from(key))).mapNotNull(NumericResponse::getOutput); + } + + /** + * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in seconds since Unix epoch + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; + * @since 3.5 + * @see Redis Documentation: HEXPIREAT + */ + Flux> hExpireAt(Publisher commands); + + /** + * Expire a given {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in milliseconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; + * @see Redis Documentation: HPEXPIREAT + * @since 3.5 + */ + default Mono hpExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field) { + + Assert.notNull(expireAt, "Duration must not be null"); + return hpExpireAt(key, expireAt, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in milliseconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; + * @see Redis Documentation: HPEXPIREAT + * @since 3.5 + */ + default Flux hpExpireAt(ByteBuffer key, Instant expireAt, List fields) { + Assert.notNull(expireAt, "Duration must not be null"); + + return hpExpireAt(Flux.just(ExpireAt.expireAt(fields, expireAt).from(key))).mapNotNull(NumericResponse::getOutput); + } + + /** + * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in milliseconds since Unix epoch + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; + * @since 3.5 + * @see Redis Documentation: HPEXPIREAT + */ + Flux> hpExpireAt(Publisher commands); + + /** + * Persist a given {@literal field} removing any associated expiration, measured as absolute + * Unix timestamp in seconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @return a {@link Mono} emitting the persist result - {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; + * + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + default Mono hPersist(ByteBuffer key, ByteBuffer field) { + + return hPersist(key, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Persist a given {@link List} of {@literal field} removing any associated expiration. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a {@link Flux} emitting the persisting results one by one - {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; + * + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + default Flux hPersist(ByteBuffer key, List fields) { + + return hPersist(Flux.just(new HashFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput); + } + + /** + * Persist a given {@link List} of {@literal field} removing any associated expiration. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the persisting results one by one - {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; + * * @since 3.5 + * @see Redis Documentation: HPERSIST + */ + Flux> hPersist(Publisher commands); + + /** + * Returns the time-to-live of a given {@literal field} in seconds. + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @return a {@link Mono} emitting the TTL result - the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; + * @see Redis Documentation: HTTL + * @since 3.5 + */ + default Mono hTtl(ByteBuffer key, ByteBuffer field) { + + return hTtl(key, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Returns the time-to-live of all the given {@literal field} in the {@link List} in seconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a {@link Flux} emitting the TTL results one by one - the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; + * @see Redis Documentation: HTTL + * @since 3.5 + */ + default Flux hTtl(ByteBuffer key, List fields) { + + return hTtl(Flux.just(new HashFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput); + } + + /** + * Returns the time-to-live of all the given {@literal field} in the {@link List} in seconds. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the persisting results one by one - the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; + * @since 3.5 + * @see Redis Documentation: HTTL + */ + Flux> hTtl(Publisher commands); + + + /** + * Returns the time-to-live of a given {@literal field} in milliseconds. + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @return a {@link Mono} emitting the TTL result - the time to live in milliseconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; + * @see Redis Documentation: HPTTL + * @since 3.5 + */ + default Mono hpTtl(ByteBuffer key, ByteBuffer field) { + + return hpTtl(key, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Returns the time-to-live of all the given {@literal field} in the {@link List} in milliseconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a {@link Flux} emitting the TTL results one by one - the time to live in milliseconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; + * @see Redis Documentation: HPTTL + * @since 3.5 + */ + default Flux hpTtl(ByteBuffer key, List fields) { + + return hpTtl(Flux.just(new HashFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput); + } + + /** + * Returns the time-to-live of all the given {@literal field} in the {@link List} in milliseconds. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the persisting results one by one - the time to live in milliseconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; + * @since 3.5 + * @see Redis Documentation: HPTTL + */ + Flux> hpTtl(Publisher commands); } diff --git a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java index 6385c56a57..066833d52d 100644 --- a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java @@ -18,6 +18,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.data.redis.core.Cursor; import org.springframework.data.redis.core.ScanOptions; @@ -29,6 +30,7 @@ * @author Costin Leau * @author Christoph Strobl * @author Mark Paluch + * @author Tihomir Mateev */ public interface RedisHashCommands { @@ -249,4 +251,111 @@ public interface RedisHashCommands { */ @Nullable Long hStrLen(byte[] key, byte[] field); + + /** + * Set time to live for given {@code field} in seconds. + * + * @param key must not be {@literal null}. + * @param seconds the amount of time after which the key will be expired in seconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.4 + */ + @Nullable + List hExpire(byte[] key, long seconds, byte[]... fields); + + /** + * Set time to live for given {@code field} in milliseconds. + * + * @param key must not be {@literal null}. + * @param millis the amount of time after which the key will be expired in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIRE + * @since 3.4 + */ + @Nullable + List hpExpire(byte[] key, long millis, byte[]... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp. + * + * @param key must not be {@literal null}. + * @param unixTime the moment in time in which the field expires, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIREAT + * @since 3.4 + */ + @Nullable + List hExpireAt(byte[] key, long unixTime, byte[]... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds. + * + * @param key must not be {@literal null}. + * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIREAT + * @since 3.4 + */ + @Nullable + List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields); + + /** + * Remove the expiration from given {@code field}. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; + * {@literal null} when used in pipeline / transaction.{@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.4 + */ + @Nullable + List hPersist(byte[] key, byte[]... fields); + + /** + * Get the time to live for {@code field} in seconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.4 + */ + @Nullable + List hTtl(byte[] key, byte[]... fields); + + /** + * Get the time to live for {@code field} in and convert it to the given {@link TimeUnit}. + * + * @param key must not be {@literal null}. + * @param timeUnit must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return for each of the fields supplied - the time to live in the {@link TimeUnit} provided; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.4 + */ + @Nullable + List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields); } diff --git a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java index 2c286ce97e..f95b618cfd 100644 --- a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java @@ -2333,6 +2333,113 @@ Long zRangeStoreRevByScore(String dstKey, String srcKey, @Nullable Long hStrLen(String key, String field); + /** + * Set time to live for given {@code field} in seconds. + * + * @param key must not be {@literal null}. + * @param seconds the amount of time after which the key will be expired in seconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.4 + */ + @Nullable + List hExpire(String key, long seconds, String... fields); + + /** + * Set time to live for given {@code field} in milliseconds. + * + * @param key must not be {@literal null}. + * @param millis the amount of time after which the key will be expired in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIRE + * @since 3.4 + */ + @Nullable + List hpExpire(String key, long millis, String... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp. + * + * @param key must not be {@literal null}. + * @param unixTime the moment in time in which the field expires, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIREAT + * @since 3.4 + */ + @Nullable + List hExpireAt(String key, long unixTime, String... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds. + * + * @param key must not be {@literal null}. + * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIREAT + * @since 3.4 + */ + @Nullable + List hpExpireAt(String key, long unixTimeInMillis, String... fields); + + /** + * Remove the expiration from given {@code field}. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; + * {@literal null} when used in pipeline / transaction.{@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.4 + */ + @Nullable + List hPersist(String key, String... fields); + + /** + * Get the time to live for {@code field} in seconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in milliseconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.4 + */ + @Nullable + List hTtl(String key, String... fields); + + /** + * Get the time to live for {@code field} in and convert it to the given {@link TimeUnit}. + * + * @param key must not be {@literal null}. + * @param timeUnit must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in the {@link TimeUnit} provided; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.4 + */ + @Nullable + List hTtl(String key, TimeUnit timeUnit, String... fields); + // ------------------------------------------------------------------------- // Methods dealing with HyperLogLog // ------------------------------------------------------------------------- diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java index 47ad6c6eec..c436afaeef 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.dao.DataAccessException; import org.springframework.data.redis.connection.RedisHashCommands; @@ -39,6 +40,7 @@ * @author Christoph Strobl * @author Mark Paluch * @author John Blum + * @author Tihomir Mateev * @since 2.0 */ class JedisClusterHashCommands implements RedisHashCommands { @@ -287,6 +289,92 @@ protected ScanIteration> doScan(CursorId cursorId, ScanOpt }.open(); } + @Override + public List hExpire(byte[] key, long seconds, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().hexpire(key, seconds, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hpExpire(byte[] key, long millis, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().hpexpire(key, millis, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().hexpireAt(key, unixTime, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().hpexpireAt(key, unixTimeInMillis, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hPersist(byte[] key, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().hpersist(key, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hTtl(byte[] key, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().httl(key, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().httl(key, fields).stream() + .map(it -> it != null ? timeUnit.convert(it, TimeUnit.SECONDS) : null) + .toList(); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + @Nullable @Override public Long hStrLen(byte[] key, byte[] field) { diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java index a0ac8debf2..887412bb09 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java @@ -25,6 +25,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.dao.InvalidDataAccessApiUsageException; import org.springframework.data.redis.connection.RedisHashCommands; @@ -43,6 +44,7 @@ * @author Christoph Strobl * @author Mark Paluch * @author John Blum + * @author Tihomir Mateev * @since 2.0 */ class JedisHashCommands implements RedisHashCommands { @@ -250,6 +252,42 @@ protected void doClose() { }.open(); } + @Override + public List hExpire(byte[] key, long seconds, byte[]... fields) { + return connection.invoke().just(Jedis::hexpire, PipelineBinaryCommands::hexpire, key, seconds, fields); + } + + @Override + public List hpExpire(byte[] key, long millis, byte[]... fields) { + return connection.invoke().just(Jedis::hpexpire, PipelineBinaryCommands::hpexpire, key, millis, fields); + } + + @Override + public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + return connection.invoke().just(Jedis::hexpireAt, PipelineBinaryCommands::hexpireAt, key, unixTime, fields); + } + + @Override + public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + return connection.invoke().just(Jedis::hpexpireAt, PipelineBinaryCommands::hpexpireAt, key, unixTimeInMillis, fields); + } + + @Override + public List hPersist(byte[] key, byte[]... fields) { + return connection.invoke().just(Jedis::hpersist, PipelineBinaryCommands::hpersist, key, fields); + } + + @Override + public List hTtl(byte[] key, byte[]... fields) { + return connection.invoke().just(Jedis::httl, PipelineBinaryCommands::httl, key, fields); + } + + @Override + public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + return connection.invoke().fromMany(Jedis::httl, PipelineBinaryCommands::httl, key, fields) + .toList(Converters.secondsToTimeUnit(timeUnit)); + } + @Nullable @Override public Long hStrLen(byte[] key, byte[] field) { diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java index e4b53f4fb4..01e683daa4 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java @@ -24,6 +24,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.dao.InvalidDataAccessApiUsageException; import org.springframework.data.redis.connection.RedisHashCommands; @@ -39,6 +40,7 @@ /** * @author Christoph Strobl * @author Mark Paluch + * @author Tihomir Mateev * @since 2.0 */ class LettuceHashCommands implements RedisHashCommands { @@ -208,6 +210,41 @@ public Cursor> hScan(byte[] key, ScanOptions options) { return hScan(key, CursorId.initial(), options); } + @Override + public List hExpire(byte[] key, long seconds, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hexpire, key, seconds, fields).toList(); + } + + @Override + public List hpExpire(byte[] key, long millis, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hpexpire, key, millis, fields).toList(); + } + + @Override + public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hexpireat, key, unixTime, fields).toList(); + } + + @Override + public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hpexpireat, key, unixTimeInMillis, fields).toList(); + } + + @Override + public List hPersist(byte[] key, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hpersist, key, fields).toList(); + } + + @Override + public List hTtl(byte[] key, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::httl, key, fields).toList(); + } + + @Override + public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::httl, key, fields) + .toList(Converters.secondsToTimeUnit(timeUnit)); + } /** * @param key diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java index b704321ef5..33e9c162e1 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java @@ -264,6 +264,90 @@ public Flux> hStrLen(Publisher> hExpire(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hexpire(command.getKey(), command.getTtl().toSeconds(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hpExpire(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hpexpire(command.getKey(), command.getTtl().toMillis(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hExpireAt(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hexpireat(command.getKey(), command.getExpireAt().getEpochSecond(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hpExpireAt(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hpexpireat(command.getKey(), command.getExpireAt().toEpochMilli(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hPersist(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hpersist(command.getKey(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hTtl(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.httl(command.getKey(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hpTtl(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hpttl(command.getKey(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + private static Map.Entry toEntry(KeyValue kv) { return new Entry() { diff --git a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java index f906462911..ff9e5b1300 100644 --- a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java @@ -15,10 +15,13 @@ */ package org.springframework.data.redis.core; +import java.time.Duration; +import java.time.Instant; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.lang.Nullable; @@ -29,6 +32,7 @@ * @author Christoph Strobl * @author Ninad Divadkar * @author Mark Paluch + * @author Tihomir Mateev */ public interface BoundHashOperations extends BoundKeyOperations { @@ -153,6 +157,78 @@ public interface BoundHashOperations extends BoundKeyOperations { @Nullable Long lengthOfValue(HK hashKey); + /** + * Set time to live for given {@code hashKey} (aka field). + * + * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); {@code -2} + * indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the timeout is {@literal null}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + List expire(Duration timeout, Collection hashKeys); + + /** + * Set the expiration for given {@code hashKey} (aka field) as a {@literal date} timestamp. + * + * @param expireAt must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + List expireAt(Instant expireAt, Collection hashKeys); + + /** + * Remove the expiration from given {@code hashKey} (aka field). + * + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; {@literal null} when + * used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + @Nullable + List persist(Collection hashKeys); + + /** + * Get the time to live for {@code hashKey} (aka field) in seconds. + * + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List getExpire(Collection hashKeys); + + /** + * Get the time to live for {@code hashKey} (aka field) and convert it to the given {@link TimeUnit}. + * + * @param timeUnit must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List getExpire(TimeUnit timeUnit, Collection hashKeys); + /** * Get size of hash at the bound key. * diff --git a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java index 974e20e13f..5df4422e48 100644 --- a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java @@ -15,6 +15,8 @@ */ package org.springframework.data.redis.core; +import java.time.Duration; +import java.time.Instant; import java.util.Collection; import java.util.Collections; import java.util.LinkedHashMap; @@ -22,6 +24,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.core.convert.converter.Converter; import org.springframework.data.redis.connection.convert.Converters; @@ -34,6 +37,7 @@ * @author Costin Leau * @author Christoph Strobl * @author Ninad Divadkar + * @author Tihomir Mateev */ class DefaultHashOperations extends AbstractOperations implements HashOperations { @@ -210,6 +214,47 @@ public Boolean putIfAbsent(K key, HK hashKey, HV value) { return execute(connection -> connection.hSetNX(rawKey, rawHashKey, rawHashValue)); } + @Override + public List expire(K key, Duration duration, Collection hashKeys) { + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + long rawTimeout = duration.toMillis(); + + return execute(connection -> connection.hpExpire(rawKey, rawTimeout, rawHashKeys)); + } + + @Override + public List expireAt(K key, Instant instant, Collection hashKeys) { + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + + return execute(connection -> connection.hpExpireAt(rawKey, instant.toEpochMilli(), rawHashKeys)); + } + + @Override + public List persist(K key, Collection hashKeys) { + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + + return execute(connection -> connection.hPersist(rawKey, rawHashKeys)); + } + + @Override + public List getExpire(K key, Collection hashKeys) { + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + + return execute(connection -> connection.hTtl(rawKey, rawHashKeys)); + } + + @Override + public List getExpire(K key, TimeUnit timeUnit, Collection hashKeys) { + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + + return execute(connection -> connection.hTtl(rawKey, timeUnit, rawHashKeys)); + } + @Override public List values(K key) { diff --git a/src/main/java/org/springframework/data/redis/core/HashOperations.java b/src/main/java/org/springframework/data/redis/core/HashOperations.java index 8a2c6641ad..ea17d26e2d 100644 --- a/src/main/java/org/springframework/data/redis/core/HashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/HashOperations.java @@ -15,10 +15,13 @@ */ package org.springframework.data.redis.core; +import java.time.Duration; +import java.time.Instant; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.lang.Nullable; @@ -28,6 +31,7 @@ * @author Costin Leau * @author Christoph Strobl * @author Ninad Divadkar + * @author Tihomir Mateev */ public interface HashOperations { @@ -221,6 +225,82 @@ public interface HashOperations { */ Cursor> scan(H key, ScanOptions options); + /** + * Set time to live for given {@code hashKey} (aka field). + * + * @param key must not be {@literal null}. + * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); {@code -2} + * indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the timeout is {@literal null}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + List expire(H key, Duration timeout, Collection hashKeys); + + /** + * Set the expiration for given {@code hashKey} (aka field) as a {@literal date} timestamp. + * + * @param key must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + List expireAt(H key, Instant expireAt, Collection hashKeys); + + /** + * Remove the expiration from given {@code hashKey} (aka field). + * + * @param key must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; {@literal null} when + * used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + @Nullable + List persist(H key, Collection hashKeys); + + /** + * Get the time to live for {@code hashKey} (aka field) in seconds. + * + * @param key must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List getExpire(H key, Collection hashKeys); + + /** + * Get the time to live for {@code hashKey} (aka field) and convert it to the given {@link TimeUnit}. + * + * @param key must not be {@literal null}. + * @param timeUnit must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List getExpire(H key, TimeUnit timeUnit, Collection hashKeys); /** * @return never {@literal null}. */ diff --git a/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java b/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java index 9dd0274783..547c351875 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java +++ b/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java @@ -15,10 +15,14 @@ */ package org.springframework.data.redis.support.collections; +import java.time.Duration; +import java.time.Instant; import java.util.Collection; import java.util.Collections; import java.util.Date; +import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -37,6 +41,7 @@ * @author Costin Leau * @author Christoph Strobl * @author Christian Bühler + * @author Tihomir Mateev */ public class DefaultRedisMap implements RedisMap { @@ -321,6 +326,31 @@ public Cursor> scan() { return scan(ScanOptions.NONE); } + @Override + public List expire(Duration timeout, Collection hashKeys) { + return Objects.requireNonNull(hashOps.expire(timeout, hashKeys)); + } + + @Override + public List expireAt(Instant expireAt, Collection hashKeys) { + return Objects.requireNonNull(hashOps.expireAt(expireAt, hashKeys)); + } + + @Override + public List persist(Collection hashKeys) { + return Objects.requireNonNull(hashOps.persist(hashKeys)); + } + + @Override + public List getExpire(Collection hashKeys) { + return Objects.requireNonNull(hashOps.getExpire(hashKeys)); + } + + @Override + public List getExpire(TimeUnit timeUnit, Collection hashKeys) { + return Objects.requireNonNull(hashOps.getExpire(timeUnit, hashKeys)); + } + private void checkResult(@Nullable Object obj) { if (obj == null) { throw new IllegalStateException("Cannot read collection with Redis connection in pipeline/multi-exec mode"); diff --git a/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java b/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java index a2eb3b8985..4b79cf0290 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java +++ b/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java @@ -15,9 +15,14 @@ */ package org.springframework.data.redis.support.collections; +import java.time.Duration; +import java.time.Instant; +import java.util.Collection; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; import org.springframework.lang.Nullable; @@ -26,6 +31,7 @@ * * @author Costin Leau * @author Christoph Strobl + * @author Tihomi Mateev */ public interface RedisMap extends RedisStore, ConcurrentMap { @@ -71,4 +77,71 @@ public interface RedisMap extends RedisStore, ConcurrentMap { * @return */ Iterator> scan(); + + /** + * Set time to live for given {hash {@code key}. + * + * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); {@code -2} + * indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the timeout is {@literal null}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + List expire(Duration timeout, Collection hashKeys); + + /** + * Set the expiration for given hash {@code key} as a {@literal date} timestamp. + * + * @param expireAt must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + List expireAt(Instant expireAt, Collection hashKeys); + + /** + * Remove the expiration from given hash {@code key}. + * + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; {@literal null} when + * used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + List persist(Collection hashKeys); + + /** + * Get the time to live for hash {@code key} in seconds. + * + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + List getExpire(Collection hashKeys); + + /** + * Get the time to live for hash {@code key} and convert it to the given {@link TimeUnit}. + * + * @param timeUnit must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + List getExpire(TimeUnit timeUnit, Collection hashKeys); } diff --git a/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java b/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java index d02d86b6fc..68981643d9 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java +++ b/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java @@ -17,16 +17,10 @@ import java.io.IOException; import java.io.OutputStream; -import java.util.Collection; -import java.util.Collections; -import java.util.Date; -import java.util.Enumeration; -import java.util.Iterator; -import java.util.LinkedHashSet; -import java.util.Map; +import java.time.Duration; +import java.time.Instant; +import java.util.*; import java.util.Map.Entry; -import java.util.Properties; -import java.util.Set; import java.util.concurrent.TimeUnit; import org.springframework.data.redis.connection.DataType; @@ -304,4 +298,35 @@ public synchronized void storeToXML(OutputStream os, String comment) throws IOEx public Iterator> scan() { throw new UnsupportedOperationException(); } + + @Override + public List expire(Duration timeout, Collection hashKeys) { + Collection keys = hashKeys.stream().map(key -> (String) key).toList(); + return Objects.requireNonNull(hashOps.expire(timeout, keys)); + } + + @Override + public List expireAt(Instant expireAt, Collection hashKeys) { + Collection keys = hashKeys.stream().map(key -> (String) key).toList(); + return Objects.requireNonNull(hashOps.expireAt(expireAt, keys)); + } + + @Override + public List persist(Collection hashKeys) { + Collection keys = hashKeys.stream().map(key -> (String) key).toList(); + return Objects.requireNonNull(hashOps.persist(keys)); + } + + @Override + public List getExpire(Collection hashKeys) { + Collection keys = hashKeys.stream().map(key -> (String) key).toList(); + return Objects.requireNonNull(hashOps.getExpire(keys)); + } + + @Override + public List getExpire(TimeUnit timeUnit, Collection hashKeys) { + Collection keys = hashKeys.stream().map(key -> (String) key).toList(); + return Objects.requireNonNull(hashOps.getExpire(timeUnit, keys)); + } + } diff --git a/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java b/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java index 4662437afe..d42153cb68 100644 --- a/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java @@ -113,6 +113,7 @@ * @author Hendrik Duerkop * @author Shyngys Sapraliyev * @author Roman Osadchuk + * @author Tihomir Mateev */ public abstract class AbstractConnectionIntegrationTests { @@ -3432,6 +3433,194 @@ void hStrLenReturnsZeroWhenKeyDoesNotExist() { verifyResults(Arrays.asList(new Object[] { 0L })); } + @Test + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsSuccessAndSetsTTL() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpire("hash-hexpire", 5L, "key-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + List results = getResults(); + assertThat(results.get(0)).isEqualTo(Boolean.TRUE); + assertThat((List) results.get(1)).contains(1L); + assertThat((List) results.get(2)).allSatisfy( value -> assertThat((Long)value).isBetween(0L, 5L)); + } + + @Test + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpire("hash-hexpire", 5L, "missking-field")); + actual.add(connection.hExpire("missing-key", 5L, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsTwoWhenZeroProvided() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpire("hash-hexpire", 0, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); + } + + @Test + @EnabledOnCommand("HPEXPIRE") + public void hpExpireReturnsSuccessAndSetsTTL() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hpExpire("hash-hexpire", 5000L, "key-2")); + actual.add(connection.hTtl("hash-hexpire", TimeUnit.MILLISECONDS,"key-2")); + + List results = getResults(); + assertThat(results.get(0)).isEqualTo(Boolean.TRUE); + assertThat((List) results.get(1)).contains(1L); + assertThat((List) results.get(2)).allSatisfy( value -> assertThat((Long)value).isBetween(0L, 5000L)); + } + + @Test + @EnabledOnCommand("HPEXPIRE") + public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hpExpire("hash-hexpire", 5L, "missing-field")); + actual.add(connection.hpExpire("missing-key", 5L, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test + @EnabledOnCommand("HPEXPIRE") + public void hpExpireReturnsTwoWhenZeroProvided() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hpExpire("hash-hexpire", 0, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); + } + + @Test + @EnabledOnCommand("HEXPIREAT") + public void hExpireAtReturnsSuccessAndSetsTTL() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + actual.add(connection.hExpireAt("hash-hexpire", inFiveSeconds, "key-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + List results = getResults(); + assertThat(results.get(0)).isEqualTo(Boolean.TRUE); + assertThat((List) results.get(1)).contains(1L); + assertThat((List) results.get(2)).allSatisfy( value -> assertThat((Long)value).isBetween(0L, 5L)); + } + + @Test + @EnabledOnCommand("HEXPIREAT") + public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + actual.add(connection.hpExpire("hash-hexpire", inFiveSeconds, "missing-field")); + actual.add(connection.hpExpire("missing-key", inFiveSeconds, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test + @EnabledOnCommand("HEXPIREAT") + public void hExpireAtReturnsTwoWhenZeroProvided() { + long fiveSecondsAgo = Instant.now().minusSeconds(5L).getEpochSecond(); + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpireAt("hash-hexpire", fiveSecondsAgo, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); + } + + @Test + @EnabledOnCommand("HEXPIREAT") + public void hpExpireAtReturnsSuccessAndSetsTTL() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + actual.add(connection.hpExpireAt("hash-hexpire", inFiveSeconds, "key-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + List results = getResults(); + assertThat(results.get(0)).isEqualTo(Boolean.TRUE); + assertThat((List) results.get(1)).contains(1L); + assertThat((List) results.get(2)).allSatisfy( value -> assertThat((Long)value).isBetween(0L, 5L)); + } + + @Test + @EnabledOnCommand("HEXPIREAT") + public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + actual.add(connection.hpExpireAt("hash-hexpire", inFiveSeconds, "missing-field")); + actual.add(connection.hpExpireAt("missing-key", inFiveSeconds, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test + @EnabledOnCommand("HPEXPIREAT") + public void hpExpireAdReturnsTwoWhenZeroProvided() { + long fiveSecondsAgo = Instant.now().minusSeconds(5L).getEpochSecond(); + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hpExpireAt("hash-hexpire", fiveSecondsAgo, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); + } + + @Test + @EnabledOnCommand("HPERSIST") + public void hPersistReturnsSuccessAndPersistsField() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpire("hash-hexpire", 5L, "key-2")); + actual.add(connection.hPersist("hash-hexpire", "key-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(1L), List.of(1L), List.of(-1L))); + } + + @Test + @EnabledOnCommand("HPERSIST") + public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hPersist("hash-hexpire", "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-1L))); + } + + @Test + @EnabledOnCommand("HPERSIST") + public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hPersist("hash-hexpire", "missing-field")); + actual.add(connection.hPersist("missing-key", "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test + @EnabledOnCommand("HTTL") + public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-1L))); + } + + @Test + @EnabledOnCommand("HTTL") + public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { + actual.add(connection.hTtl("hash-hexpire", "missing-field")); + actual.add(connection.hTtl("missing-key", "key-2")); + + verifyResults(Arrays.asList(new Object[] { List.of(-2L), List.of(-2L) })); + } + @Test // DATAREDIS-694 void touchReturnsNrOfKeysTouched() { diff --git a/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java b/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java index 6fe6eb43c1..022d2e6a52 100644 --- a/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java +++ b/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java @@ -36,6 +36,7 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.time.Duration; +import java.time.Instant; import java.util.*; import java.util.concurrent.TimeUnit; @@ -84,6 +85,7 @@ * @author Mark Paluch * @author Pavel Khokhlov * @author Dennis Neufeld + * @author Tihomir Mateev */ @EnabledOnRedisClusterAvailable @ExtendWith(JedisExtension.class) @@ -1038,6 +1040,148 @@ public void hStrLenReturnsZeroWhenKeyDoesNotExist() { assertThat(clusterConnection.hashCommands().hStrLen(KEY_1_BYTES, KEY_1_BYTES)).isEqualTo(0L); } + @Test + public void hExpireReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); + } + + @Test + public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + // missing field + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hExpireReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hpExpireReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5000L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS,KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); + } + + @Test + public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + // missing field + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hpExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hpExpireReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hExpireAtReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); + } + + @Test + public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + // missing field + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hExpireAdReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hpExpireAtReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); + } + + @Test + public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + // missing field + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hpExpireAdReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hPersistReturnsSuccessAndPersistsField() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + } + + @Test + public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + } + + @Test + public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_3_BYTES,KEY_2_BYTES)).contains(-2L); + + } + + @Test + public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + + } + + @Test + public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { + + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_3_BYTES,KEY_2_BYTES)).contains(-2L); + + } + @Test // DATAREDIS-315 public void hValsShouldRetrieveValuesCorrectly() { diff --git a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java index b1fe07dae6..c779322576 100644 --- a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java +++ b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java @@ -32,6 +32,7 @@ import java.nio.charset.StandardCharsets; import java.time.Duration; +import java.time.Instant; import java.util.*; import java.util.concurrent.TimeUnit; @@ -73,6 +74,7 @@ * @author Christoph Strobl * @author Mark Paluch * @author Dennis Neufeld + * @author Tihomir Mateev */ @SuppressWarnings("deprecation") @EnabledOnRedisClusterAvailable @@ -1095,6 +1097,147 @@ public void hStrLenReturnsZeroWhenKeyDoesNotExist() { assertThat(clusterConnection.hashCommands().hStrLen(KEY_1_BYTES, KEY_1_BYTES)).isEqualTo(0L); } + @Test + public void hExpireReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, KEY_2_BYTES)).allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); + } + + @Test + public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + // missing field + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hExpireReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hpExpireReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5000L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS,KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); + } + + @Test + public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + // missing field + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hpExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hpExpireReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hExpireAtReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, KEY_2_BYTES)).allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); + } + + @Test + public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + // missing field + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hExpireAdReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hpExpireAtReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); + } + + @Test + public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + // missing field + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hpExpireAdReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hPersistReturnsSuccessAndPersistsField() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + } + + @Test + public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + } + + @Test + public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_3_BYTES,KEY_2_BYTES)).contains(-2L); + + } + + @Test + public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + + } + + @Test + public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { + + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_3_BYTES,KEY_2_BYTES)).contains(-2L); + + } + @Test // DATAREDIS-315 public void hValsShouldRetrieveValuesCorrectly() { diff --git a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java index 973e061f90..86b3ca74f1 100644 --- a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java @@ -21,6 +21,8 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.Instant; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -35,6 +37,7 @@ * * @author Christoph Strobl * @author Mark Paluch + * @author Tihomir Mateev */ public class LettuceReactiveHashCommandsIntegrationTests extends LettuceReactiveCommandsTestSupport { @@ -288,4 +291,61 @@ void hStrLenReturnsZeroWhenKeyDoesNotExist() { connection.hashCommands().hStrLen(KEY_1_BBUFFER, FIELD_1_BBUFFER).as(StepVerifier::create).expectNext(0L) // .verifyComplete(); } + + @ParameterizedRedisTest + void hExpireShouldHandleMultipleParametersCorrectly() { + assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); + assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); + final var fields = Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER); + + connection.hashCommands().hExpire(KEY_1_BBUFFER, Duration.ofSeconds(1), fields).as(StepVerifier::create) // + .expectNext(1L) + .expectNext(1L) + .expectNext(-2L) + .expectComplete() + .verify(); + + assertThat(nativeCommands.httl(KEY_1, FIELD_1)).allSatisfy(it -> assertThat(it).isBetween(0L, 1000L)); + assertThat(nativeCommands.httl(KEY_1, FIELD_2)).allSatisfy(it -> assertThat(it).isBetween(0L, 1000L)); + assertThat(nativeCommands.httl(KEY_1, FIELD_3)).allSatisfy(it -> assertThat(it).isEqualTo(-2L)); + + } + + @ParameterizedRedisTest + void hExpireAtShouldHandleMultipleParametersCorrectly() { + assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); + assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); + final var fields = Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER); + + connection.hashCommands().hExpireAt(KEY_1_BBUFFER, Instant.now().plusSeconds(1), fields).as(StepVerifier::create) // + .expectNext(1L) + .expectNext(1L) + .expectNext(-2L) + .expectComplete() + .verify(); + + assertThat(nativeCommands.httl(KEY_1, FIELD_1, FIELD_2)).allSatisfy(it -> assertThat(it).isBetween(0L, 1000L)); + assertThat(nativeCommands.httl(KEY_1, FIELD_3)).allSatisfy(it -> assertThat(it).isEqualTo(-2L)); + + } + + @ParameterizedRedisTest + void hPersistShouldPersistFields() { + assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); + assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); + + assertThat(nativeCommands.hexpire(KEY_1, 1000, FIELD_1)) + .allSatisfy(it -> assertThat(it).isEqualTo(1L)); + + final var fields = Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER); + + connection.hashCommands().hPersist(KEY_1_BBUFFER, fields).as(StepVerifier::create) // + .expectNext(1L) + .expectNext(-1L) + .expectNext(-2L) + .expectComplete() + .verify(); + + assertThat(nativeCommands.httl(KEY_1, FIELD_1, FIELD_2)).allSatisfy(it -> assertThat(it).isEqualTo(-1L)); + } } diff --git a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java index 8a2b7065ad..ef9737d7a4 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java @@ -19,9 +19,13 @@ import static org.assertj.core.api.Assumptions.*; import java.io.IOException; +import java.time.Duration; +import java.time.Instant; import java.util.Arrays; import java.util.Collection; +import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.BeforeEach; @@ -39,6 +43,7 @@ * * @author Jennifer Hickey * @author Christoph Strobl + * @author Tihomir Mateev * @param Key type * @param Hash key type * @param Hash value type @@ -202,4 +207,79 @@ void randomValue() { Map values = hashOps.randomEntries(key, 10); assertThat(values).hasSize(2).containsEntry(key1, val1).containsEntry(key2, val2); } + + @ParameterizedRedisTest + void testExpireAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + assertThat(redisTemplate.opsForHash().expire(key, Duration.ofMillis(500), List.of(key1))) + .containsExactly(1L); + + assertThat(redisTemplate.opsForHash().getExpire(key, List.of(key1))) + .allSatisfy(it -> assertThat(it).isBetween(0L, 500L)); + } + + @ParameterizedRedisTest + void testExpireAndGetExpireSeconds() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + assertThat(redisTemplate.opsForHash().expire(key, Duration.ofSeconds(5), List.of(key1, key2))) + .containsExactly(1L, 1L); + + assertThat(redisTemplate.opsForHash().getExpire(key, TimeUnit.SECONDS, List.of(key1, key2))) + .allSatisfy(it -> assertThat(it).isBetween(0L, 5L)); + } + + @ParameterizedRedisTest + void testExpireAtAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + assertThat(redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(500), List.of(key1, key2))) + .containsExactly(1L, 1L); + + assertThat(redisTemplate.opsForHash().getExpire(key, List.of(key1, key2))) + .allSatisfy(it -> assertThat(it).isBetween(0L, 500L)); + } + + @ParameterizedRedisTest + void testPersistAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + assertThat(redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(500), List.of(key1, key2))) + .containsExactly(1L, 1L); + + assertThat(redisTemplate.opsForHash().persist(key, List.of(key1, key2))) + .allSatisfy(it -> assertThat(it).isEqualTo(1L)); + + assertThat(redisTemplate.opsForHash().getExpire(key, List.of(key1, key2))) + .allSatisfy(it -> assertThat(it).isEqualTo(-1L)); + } } diff --git a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java index 435d5b4500..0f364c45c6 100644 --- a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java @@ -20,14 +20,18 @@ import java.io.IOException; import java.text.DecimalFormat; +import java.time.Duration; +import java.time.Instant; import java.util.AbstractMap; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.assertj.core.api.Assumptions; import org.junit.jupiter.api.BeforeEach; @@ -190,6 +194,34 @@ void testIncrement() { assertThat(map.increment(k1, 10)).isEqualTo(Long.valueOf(Long.valueOf((String) v1) + 10)); } + @ParameterizedRedisTest + void testExpire() { + K k1 = getKey(); + V v1 = getValue(); + assertThat(map.put(k1, v1)).isEqualTo(null); + + Collection keys = Collections.singletonList(k1); + assertThat(map.expire(Duration.ofSeconds(5), keys)).contains(1L); + assertThat(map.getExpire(keys)).allSatisfy(expiration -> assertThat(expiration).isBetween(1L, 5L)); + assertThat(map.getExpire(TimeUnit.MILLISECONDS, keys)) + .allSatisfy(expiration -> assertThat(expiration).isBetween(1000L, 5000L)); + assertThat(map.persist(keys)).contains(1L); + } + + @ParameterizedRedisTest + void testExpireAt() { + K k1 = getKey(); + V v1 = getValue(); + assertThat(map.put(k1, v1)).isEqualTo(null); + + Collection keys = Collections.singletonList(k1); + assertThat(map.expireAt(Instant.now().plusSeconds(5), keys)).contains(1L); + assertThat(map.getExpire(keys)).allSatisfy(expiration -> assertThat(expiration).isBetween(1L, 5L)); + assertThat(map.getExpire(TimeUnit.MILLISECONDS, keys)) + .allSatisfy(expiration -> assertThat(expiration).isBetween(1000L, 5000L)); + assertThat(map.persist(keys)).contains(1L); + } + @ParameterizedRedisTest void testIncrementDouble() { assumeThat(valueFactory instanceof DoubleAsStringObjectFactory).isTrue(); @@ -496,4 +528,5 @@ public void randomEntryFromHash() { assertThat(map.randomEntry()).isIn(new AbstractMap.SimpleImmutableEntry(k1, v1), new AbstractMap.SimpleImmutableEntry(k2, v2)); } + } From b03bc2d122ea4cffa7bbcbf7308d2df1c5392f76 Mon Sep 17 00:00:00 2001 From: Christoph Strobl Date: Mon, 3 Feb 2025 11:05:47 +0100 Subject: [PATCH 03/13] Guard Tests Make sure tests do not run when targeting older server versions. --- .../jedis/JedisClusterConnectionTests.java | 18 +++++++++++++++++- .../lettuce/LettuceClusterConnectionTests.java | 18 +++++++++++++++++- ...ceReactiveHashCommandsIntegrationTests.java | 4 ++++ .../DefaultHashOperationsIntegrationTests.java | 5 +++++ .../AbstractRedisMapIntegrationTests.java | 2 ++ 5 files changed, 45 insertions(+), 2 deletions(-) diff --git a/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java b/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java index 022d2e6a52..4e41e60954 100644 --- a/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java +++ b/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java @@ -1041,6 +1041,7 @@ public void hStrLenReturnsZeroWhenKeyDoesNotExist() { } @Test + @EnabledOnCommand("HEXPIRE") public void hExpireReturnsSuccessAndSetsTTL() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1050,6 +1051,7 @@ public void hExpireReturnsSuccessAndSetsTTL() { } @Test + @EnabledOnCommand("HEXPIRE") public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); // missing field @@ -1059,6 +1061,7 @@ public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { } @Test + @EnabledOnCommand("HEXPIRE") public void hExpireReturnsTwoWhenZeroProvided() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1066,6 +1069,7 @@ public void hExpireReturnsTwoWhenZeroProvided() { } @Test + @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsSuccessAndSetsTTL() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1075,6 +1079,7 @@ public void hpExpireReturnsSuccessAndSetsTTL() { } @Test + @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); // missing field @@ -1084,6 +1089,7 @@ public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { } @Test + @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsTwoWhenZeroProvided() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1091,6 +1097,7 @@ public void hpExpireReturnsTwoWhenZeroProvided() { } @Test + @EnabledOnCommand("HEXPIRE") public void hExpireAtReturnsSuccessAndSetsTTL() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); @@ -1099,6 +1106,7 @@ public void hExpireAtReturnsSuccessAndSetsTTL() { } @Test + @EnabledOnCommand("HEXPIRE") public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); @@ -1110,6 +1118,7 @@ public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { } @Test + @EnabledOnCommand("HEXPIRE") public void hExpireAdReturnsTwoWhenZeroProvided() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1117,6 +1126,7 @@ public void hExpireAdReturnsTwoWhenZeroProvided() { } @Test + @EnabledOnCommand("HEXPIRE") public void hpExpireAtReturnsSuccessAndSetsTTL() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); @@ -1126,6 +1136,7 @@ public void hpExpireAtReturnsSuccessAndSetsTTL() { } @Test + @EnabledOnCommand("HEXPIRE") public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); @@ -1137,6 +1148,7 @@ public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { } @Test + @EnabledOnCommand("HEXPIRE") public void hpExpireAdReturnsTwoWhenZeroProvided() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1144,6 +1156,7 @@ public void hpExpireAdReturnsTwoWhenZeroProvided() { } @Test + @EnabledOnCommand("HEXPIRE") public void hPersistReturnsSuccessAndPersistsField() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); @@ -1152,12 +1165,14 @@ public void hPersistReturnsSuccessAndPersistsField() { } @Test + @EnabledOnCommand("HEXPIRE") public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); } @Test + @EnabledOnCommand("HEXPIRE") public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1167,14 +1182,15 @@ public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { } @Test + @EnabledOnCommand("HEXPIRE") public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); - } @Test + @EnabledOnCommand("HEXPIRE") public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); diff --git a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java index c779322576..171cdda3b8 100644 --- a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java +++ b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java @@ -1098,6 +1098,7 @@ public void hStrLenReturnsZeroWhenKeyDoesNotExist() { } @Test + @EnabledOnCommand("HEXPIRE") public void hExpireReturnsSuccessAndSetsTTL() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1106,6 +1107,7 @@ public void hExpireReturnsSuccessAndSetsTTL() { } @Test + @EnabledOnCommand("HEXPIRE") public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); // missing field @@ -1115,6 +1117,7 @@ public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { } @Test + @EnabledOnCommand("HEXPIRE") public void hExpireReturnsTwoWhenZeroProvided() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1122,6 +1125,7 @@ public void hExpireReturnsTwoWhenZeroProvided() { } @Test + @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsSuccessAndSetsTTL() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1131,6 +1135,7 @@ public void hpExpireReturnsSuccessAndSetsTTL() { } @Test + @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); // missing field @@ -1140,6 +1145,7 @@ public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { } @Test + @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsTwoWhenZeroProvided() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1147,6 +1153,7 @@ public void hpExpireReturnsTwoWhenZeroProvided() { } @Test + @EnabledOnCommand("HEXPIRE") public void hExpireAtReturnsSuccessAndSetsTTL() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); @@ -1155,6 +1162,7 @@ public void hExpireAtReturnsSuccessAndSetsTTL() { } @Test + @EnabledOnCommand("HEXPIRE") public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); @@ -1166,6 +1174,7 @@ public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { } @Test + @EnabledOnCommand("HEXPIRE") public void hExpireAdReturnsTwoWhenZeroProvided() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1173,6 +1182,7 @@ public void hExpireAdReturnsTwoWhenZeroProvided() { } @Test + @EnabledOnCommand("HEXPIRE") public void hpExpireAtReturnsSuccessAndSetsTTL() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); @@ -1182,6 +1192,7 @@ public void hpExpireAtReturnsSuccessAndSetsTTL() { } @Test + @EnabledOnCommand("HEXPIRE") public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); @@ -1193,6 +1204,7 @@ public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { } @Test + @EnabledOnCommand("HEXPIRE") public void hpExpireAdReturnsTwoWhenZeroProvided() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1200,6 +1212,7 @@ public void hpExpireAdReturnsTwoWhenZeroProvided() { } @Test + @EnabledOnCommand("HEXPIRE") public void hPersistReturnsSuccessAndPersistsField() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); @@ -1208,12 +1221,14 @@ public void hPersistReturnsSuccessAndPersistsField() { } @Test + @EnabledOnCommand("HEXPIRE") public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); } @Test + @EnabledOnCommand("HEXPIRE") public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1223,6 +1238,7 @@ public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { } @Test + @EnabledOnCommand("HEXPIRE") public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); @@ -1231,11 +1247,11 @@ public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { } @Test + @EnabledOnCommand("HEXPIRE") public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); assertThat(clusterConnection.hashCommands().hTtl(KEY_3_BYTES,KEY_2_BYTES)).contains(-2L); - } @Test // DATAREDIS-315 diff --git a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java index 86b3ca74f1..4ef5fcffe3 100644 --- a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java @@ -17,6 +17,7 @@ import static org.assertj.core.api.Assertions.*; +import org.springframework.data.redis.test.condition.EnabledOnCommand; import reactor.test.StepVerifier; import java.nio.ByteBuffer; @@ -293,6 +294,7 @@ void hStrLenReturnsZeroWhenKeyDoesNotExist() { } @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") void hExpireShouldHandleMultipleParametersCorrectly() { assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); @@ -312,6 +314,7 @@ void hExpireShouldHandleMultipleParametersCorrectly() { } @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") void hExpireAtShouldHandleMultipleParametersCorrectly() { assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); @@ -330,6 +333,7 @@ void hExpireAtShouldHandleMultipleParametersCorrectly() { } @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") void hPersistShouldPersistFields() { assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); diff --git a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java index ef9737d7a4..b9643dd331 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java @@ -34,6 +34,7 @@ import org.springframework.data.redis.StringObjectFactory; import org.springframework.data.redis.connection.jedis.JedisConnectionFactory; import org.springframework.data.redis.connection.jedis.extension.JedisConnectionFactoryExtension; +import org.springframework.data.redis.test.condition.EnabledOnCommand; import org.springframework.data.redis.test.extension.RedisStanalone; import org.springframework.data.redis.test.extension.parametrized.MethodSource; import org.springframework.data.redis.test.extension.parametrized.ParameterizedRedisTest; @@ -208,6 +209,7 @@ void randomValue() { assertThat(values).hasSize(2).containsEntry(key1, val1).containsEntry(key2, val2); } + @EnabledOnCommand("HEXPIRE") @ParameterizedRedisTest void testExpireAndGetExpireMillis() { @@ -227,6 +229,7 @@ void testExpireAndGetExpireMillis() { } @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") void testExpireAndGetExpireSeconds() { K key = keyFactory.instance(); @@ -245,6 +248,7 @@ void testExpireAndGetExpireSeconds() { } @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") void testExpireAtAndGetExpireMillis() { K key = keyFactory.instance(); @@ -263,6 +267,7 @@ void testExpireAtAndGetExpireMillis() { } @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") void testPersistAndGetExpireMillis() { K key = keyFactory.instance(); diff --git a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java index 0f364c45c6..b131a782d5 100644 --- a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java @@ -195,6 +195,7 @@ void testIncrement() { } @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") void testExpire() { K k1 = getKey(); V v1 = getValue(); @@ -209,6 +210,7 @@ void testExpire() { } @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") void testExpireAt() { K k1 = getKey(); V v1 = getValue(); From fa81f5eb8da900a6dec80ab06b3315907c552316 Mon Sep 17 00:00:00 2001 From: Christoph Strobl Date: Wed, 5 Feb 2025 13:49:34 +0100 Subject: [PATCH 04/13] Introduce dedicated objects for time to live and status changes. This commit introduces several value objects capturing keys and their respective time to live/status change values. --- .../DefaultStringRedisConnection.java | 10 + .../connection/DefaultedRedisConnection.java | 7 + .../redis/connection/RedisHashCommands.java | 83 ++++- .../redis/connection/RedisKeyCommands.java | 59 ++++ .../connection/StringRedisConnection.java | 23 +- .../jedis/JedisClusterHashCommands.java | 12 + .../connection/jedis/JedisHashCommands.java | 5 + .../lettuce/LettuceHashCommands.java | 6 + .../data/redis/core/BoundHashOperations.java | 10 +- .../redis/core/DefaultHashOperations.java | 71 ++-- .../core/DefaultReactiveHashOperations.java | 94 +++++- .../data/redis/core/Expirations.java | 303 ++++++++++++++++++ .../data/redis/core/ExpireChanges.java | 195 +++++++++++ .../data/redis/core/HashOperations.java | 13 +- .../redis/core/ReactiveHashOperations.java | 73 +++++ .../data/redis/core/TimeoutUtils.java | 6 +- .../support/collections/DefaultRedisMap.java | 13 +- .../redis/support/collections/RedisMap.java | 13 +- .../support/collections/RedisProperties.java | 27 +- .../AbstractConnectionIntegrationTests.java | 29 +- .../LettuceClusterConnectionTests.java | 21 +- ...DefaultHashOperationsIntegrationTests.java | 73 +++-- ...eactiveHashOperationsIntegrationTests.java | 122 ++++++- .../data/redis/core/ExpirationsUnitTest.java | 94 ++++++ .../AbstractRedisMapIntegrationTests.java | 34 +- 25 files changed, 1274 insertions(+), 122 deletions(-) create mode 100644 src/main/java/org/springframework/data/redis/core/Expirations.java create mode 100644 src/main/java/org/springframework/data/redis/core/ExpireChanges.java create mode 100644 src/test/java/org/springframework/data/redis/core/ExpirationsUnitTest.java diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java index cb815eb412..77df3bc886 100644 --- a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java @@ -2596,6 +2596,11 @@ public List hTtl(byte[] key, byte[]... fields) { return this.delegate.hTtl(key, fields); } + @Override + public List hpTtl(byte[] key, byte[]... fields) { + return this.delegate.hpTtl(key, fields); + } + @Override public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { return this.delegate.hTtl(key, timeUnit, fields); @@ -2636,6 +2641,11 @@ public List hTtl(String key, TimeUnit timeUnit, String... fields) { return hTtl(serialize(key), timeUnit, serializeMulti(fields)); } + @Override + public List hpTtl(String key, String... fields) { + return hTtl(serialize(key), serializeMulti(fields)); + } + @Override public void setClientName(byte[] name) { this.delegate.setClientName(name); diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java index ddbcc68d2c..4ba3d3ec0f 100644 --- a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java @@ -1520,6 +1520,13 @@ default List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { return hashCommands().hTtl(key, timeUnit, fields); } + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hpTtl(byte[] key, byte[]... fields) { + return hashCommands().hpTtl(key, fields); + } + // GEO COMMANDS /** @deprecated in favor of {@link RedisConnection#geoCommands()}}. */ diff --git a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java index 066833d52d..7af2be01ef 100644 --- a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java @@ -15,6 +15,7 @@ */ package org.springframework.data.redis.connection; +import java.time.Duration; import java.util.List; import java.util.Map; import java.util.Set; @@ -253,37 +254,73 @@ public interface RedisHashCommands { Long hStrLen(byte[] key, byte[] field); /** - * Set time to live for given {@code field} in seconds. + * Set time to live for given {@code fields} in seconds. * * @param key must not be {@literal null}. - * @param seconds the amount of time after which the key will be expired in seconds, must not be {@literal null}. + * @param seconds the amount of time after which the fields will be expired in seconds, must not be {@literal null}. * @param fields must not be {@literal null}. * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HEXPIRE - * @since 3.4 + * @since 3.5 */ @Nullable List hExpire(byte[] key, long seconds, byte[]... fields); /** - * Set time to live for given {@code field} in milliseconds. + * Set time to live for given {@code fields}. * * @param key must not be {@literal null}. - * @param millis the amount of time after which the key will be expired in milliseconds, must not be {@literal null}. + * @param ttl the amount of time after which the fields will be expired in {@link Duration#toSeconds() seconds} precision, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + default List hExpire(byte[] key, Duration ttl, byte[]... fields) { + return hExpire(key, ttl.toSeconds(), fields); + } + + /** + * Set time to live for given {@code fields} in milliseconds. + * + * @param key must not be {@literal null}. + * @param millis the amount of time after which the fields will be expired in milliseconds, must not be {@literal null}. * @param fields must not be {@literal null}. * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HPEXPIRE - * @since 3.4 + * @since 3.5 */ @Nullable List hpExpire(byte[] key, long millis, byte[]... fields); + /** + * Set time to live for given {@code fields} in milliseconds. + * + * @param key must not be {@literal null}. + * @param ttl the amount of time after which the fields will be expired in {@link Duration#toMillis() milliseconds} precision, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIRE + * @since 3.5 + */ + @Nullable + default List hpExpire(byte[] key, Duration ttl, byte[]... fields) { + return hpExpire(key, ttl.toMillis(), fields); + } + /** * Set the expiration for given {@code field} as a {@literal UNIX} timestamp. * @@ -295,7 +332,7 @@ public interface RedisHashCommands { * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HEXPIREAT - * @since 3.4 + * @since 3.5 */ @Nullable List hExpireAt(byte[] key, long unixTime, byte[]... fields); @@ -311,7 +348,7 @@ public interface RedisHashCommands { * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HPEXPIREAT - * @since 3.4 + * @since 3.5 */ @Nullable List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields); @@ -325,27 +362,27 @@ public interface RedisHashCommands { * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; * {@literal null} when used in pipeline / transaction.{@literal null} when used in pipeline / transaction. * @see Redis Documentation: HPERSIST - * @since 3.4 + * @since 3.5 */ @Nullable List hPersist(byte[] key, byte[]... fields); /** - * Get the time to live for {@code field} in seconds. + * Get the time to live for {@code fields} in seconds. * * @param key must not be {@literal null}. * @param fields must not be {@literal null}. * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. - * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * to signal an error. The command returns {@code -1} if the field exists but has no associated expiration time. + * The command returns {@code -2} if the field does not exist; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HTTL - * @since 3.4 + * @since 3.5 */ @Nullable List hTtl(byte[] key, byte[]... fields); /** - * Get the time to live for {@code field} in and convert it to the given {@link TimeUnit}. + * Get the time to live for {@code fields} in and convert it to the given {@link TimeUnit}. * * @param key must not be {@literal null}. * @param timeUnit must not be {@literal null}. @@ -354,8 +391,24 @@ public interface RedisHashCommands { * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HTTL - * @since 3.4 + * @since 3.5 */ @Nullable + // TODO: this is complete nonsense as it would jeopardize negative values + // TODO: this should be a List> List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields); + + /** + * Get the time to live for {@code fields} in milliseconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List hpTtl(byte[] key, byte[]... fields); } diff --git a/src/main/java/org/springframework/data/redis/connection/RedisKeyCommands.java b/src/main/java/org/springframework/data/redis/connection/RedisKeyCommands.java index 414f178d92..49326637d3 100644 --- a/src/main/java/org/springframework/data/redis/connection/RedisKeyCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/RedisKeyCommands.java @@ -16,6 +16,7 @@ package org.springframework.data.redis.connection; import java.time.Duration; +import java.time.Instant; import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -191,6 +192,20 @@ default Cursor scan(KeyScanOptions options) { @Nullable Boolean expire(byte[] key, long seconds); + /** + * Set time to live for given {@code key} using {@link Duration#toSeconds() seconds} precision. + * + * @param key must not be {@literal null}. + * @param duration + * @return {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: EXPIRE + * @since 3.5 + */ + @Nullable + default Boolean expire(byte[] key, Duration duration) { + return expire(key, duration.toSeconds()); + } + /** * Set time to live for given {@code key} in milliseconds. * @@ -202,6 +217,20 @@ default Cursor scan(KeyScanOptions options) { @Nullable Boolean pExpire(byte[] key, long millis); + /** + * Set time to live for given {@code key} using {@link Duration#toMillis() milliseconds} precision. + * + * @param key must not be {@literal null}. + * @param duration + * @return {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: PEXPIRE + * @since 3.5 + */ + @Nullable + default Boolean pExpire(byte[] key, Duration duration) { + return pExpire(key, duration.toMillis()); + } + /** * Set the expiration for given {@code key} as a {@literal UNIX} timestamp. * @@ -213,6 +242,21 @@ default Cursor scan(KeyScanOptions options) { @Nullable Boolean expireAt(byte[] key, long unixTime); + /** + * Set the expiration for given {@code key} as a {@literal UNIX} timestamp in {@link Instant#getEpochSecond() seconds} + * precision. + * + * @param key must not be {@literal null}. + * @param unixTime + * @return {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: EXPIREAT + * @since 3.5 + */ + @Nullable + default Boolean expireAt(byte[] key, Instant unixTime) { + return expireAt(key, unixTime.getEpochSecond()); + } + /** * Set the expiration for given {@code key} as a {@literal UNIX} timestamp in milliseconds. * @@ -224,6 +268,21 @@ default Cursor scan(KeyScanOptions options) { @Nullable Boolean pExpireAt(byte[] key, long unixTimeInMillis); + /** + * Set the expiration for given {@code key} as a {@literal UNIX} timestamp in {@link Instant#toEpochMilli() + * milliseconds} precision. + * + * @param key must not be {@literal null}. + * @param unixTime + * @return {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: PEXPIREAT + * @since 3.5 + */ + @Nullable + default Boolean pExpireAt(byte[] key, Instant unixTime) { + return pExpireAt(key, unixTime.toEpochMilli()); + } + /** * Remove the expiration from given {@code key}. * diff --git a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java index f95b618cfd..ed0101641e 100644 --- a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java @@ -2333,6 +2333,7 @@ Long zRangeStoreRevByScore(String dstKey, String srcKey, @Nullable Long hStrLen(String key, String field); + // TODO: why why whay is this such a shitty api that there's missing all the NX, XX, GT Options /** * Set time to live for given {@code field} in seconds. * @@ -2412,7 +2413,7 @@ Long zRangeStoreRevByScore(String dstKey, String srcKey, List hPersist(String key, String... fields); /** - * Get the time to live for {@code field} in seconds. + * Get the time to live for {@code fields} in seconds. * * @param key must not be {@literal null}. * @param fields must not be {@literal null}. @@ -2420,13 +2421,13 @@ Long zRangeStoreRevByScore(String dstKey, String srcKey, * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HTTL - * @since 3.4 + * @since 3.5 */ @Nullable List hTtl(String key, String... fields); /** - * Get the time to live for {@code field} in and convert it to the given {@link TimeUnit}. + * Get the time to live for {@code fields} in and convert it to the given {@link TimeUnit}. * * @param key must not be {@literal null}. * @param timeUnit must not be {@literal null}. @@ -2435,11 +2436,25 @@ Long zRangeStoreRevByScore(String dstKey, String srcKey, * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HTTL - * @since 3.4 + * @since 3.5 */ @Nullable List hTtl(String key, TimeUnit timeUnit, String... fields); + /** + * Get the time to live for {@code fields} in seconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in milliseconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List hpTtl(String key, String... fields); + // ------------------------------------------------------------------------- // Methods dealing with HyperLogLog // ------------------------------------------------------------------------- diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java index c436afaeef..9a6815460b 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java @@ -375,6 +375,18 @@ public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { } } + @Override + public List hpTtl(byte[] key, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().hpttl(key, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + @Nullable @Override public Long hStrLen(byte[] key, byte[] field) { diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java index 887412bb09..069324c9db 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java @@ -288,6 +288,11 @@ public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { .toList(Converters.secondsToTimeUnit(timeUnit)); } + @Override + public List hpTtl(byte[] key, byte[]... fields) { + return connection.invoke().just(Jedis::hpttl, PipelineBinaryCommands::hpttl, key, fields); + } + @Nullable @Override public Long hStrLen(byte[] key, byte[] field) { diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java index 01e683daa4..61c6b8501a 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java @@ -246,6 +246,12 @@ public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { .toList(Converters.secondsToTimeUnit(timeUnit)); } + @Override + public List hpTtl(byte[] key, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hpttl, key, fields) + .toList(); + } + /** * @param key * @param cursorId diff --git a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java index ff9e5b1300..48079707b7 100644 --- a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java @@ -171,7 +171,7 @@ public interface BoundHashOperations extends BoundKeyOperations { * @since 3.5 */ @Nullable - List expire(Duration timeout, Collection hashKeys); + ExpireChanges expire(Duration timeout, Collection hashKeys); /** * Set the expiration for given {@code hashKey} (aka field) as a {@literal date} timestamp. @@ -187,7 +187,7 @@ public interface BoundHashOperations extends BoundKeyOperations { * @since 3.5 */ @Nullable - List expireAt(Instant expireAt, Collection hashKeys); + ExpireChanges expireAt(Instant expireAt, Collection hashKeys); /** * Remove the expiration from given {@code hashKey} (aka field). @@ -200,7 +200,7 @@ public interface BoundHashOperations extends BoundKeyOperations { * @since 3.5 */ @Nullable - List persist(Collection hashKeys); + ExpireChanges persist(Collection hashKeys); /** * Get the time to live for {@code hashKey} (aka field) in seconds. @@ -213,7 +213,7 @@ public interface BoundHashOperations extends BoundKeyOperations { * @since 3.5 */ @Nullable - List getExpire(Collection hashKeys); + Expirations getExpire(Collection hashKeys); /** * Get the time to live for {@code hashKey} (aka field) and convert it to the given {@link TimeUnit}. @@ -227,7 +227,7 @@ public interface BoundHashOperations extends BoundKeyOperations { * @since 3.5 */ @Nullable - List getExpire(TimeUnit timeUnit, Collection hashKeys); + Expirations getExpire(TimeUnit timeUnit, Collection hashKeys); /** * Get size of hash at the bound key. diff --git a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java index 5df4422e48..88c76c529d 100644 --- a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java @@ -28,6 +28,7 @@ import org.springframework.core.convert.converter.Converter; import org.springframework.data.redis.connection.convert.Converters; +import org.springframework.data.redis.core.Expirations.Timeouts; import org.springframework.lang.Nullable; import org.springframework.util.Assert; @@ -215,44 +216,76 @@ public Boolean putIfAbsent(K key, HK hashKey, HV value) { } @Override - public List expire(K key, Duration duration, Collection hashKeys) { - byte[] rawKey = rawKey(key); - byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); - long rawTimeout = duration.toMillis(); + public ExpireChanges expire(K key, Duration duration, Collection hashKeys) { - return execute(connection -> connection.hpExpire(rawKey, rawTimeout, rawHashKeys)); - } + List orderedKeys = List.copyOf(hashKeys); - @Override - public List expireAt(K key, Instant instant, Collection hashKeys) { byte[] rawKey = rawKey(key); - byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); + boolean splitSecond = TimeoutUtils.hasMillis(duration); + + List raw = execute(connection -> { + if (splitSecond) { + return connection.hashCommands().hpExpire(rawKey, duration.toMillis(), rawHashKeys); + } + return connection.hashCommands().hExpire(rawKey, TimeoutUtils.toSeconds(duration), rawHashKeys); + }); - return execute(connection -> connection.hpExpireAt(rawKey, instant.toEpochMilli(), rawHashKeys)); + return raw != null ? ExpireChanges.of(orderedKeys, raw) : null; } @Override - public List persist(K key, Collection hashKeys) { + public ExpireChanges expireAt(K key, Instant instant, Collection hashKeys) { + + List orderedKeys = List.copyOf(hashKeys); + byte[] rawKey = rawKey(key); - byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); + + Long millis = instant.toEpochMilli(); + + List raw = execute(connection -> TimeoutUtils.containsSplitSecond(millis) + ? connection.hashCommands().hpExpireAt(rawKey, millis, rawHashKeys) + : connection.hashCommands().hExpireAt(rawKey, instant.getEpochSecond(), rawHashKeys)); - return execute(connection -> connection.hPersist(rawKey, rawHashKeys)); + return raw != null ? ExpireChanges.of(orderedKeys, raw) : null; } @Override - public List getExpire(K key, Collection hashKeys) { + public ExpireChanges persist(K key, Collection hashKeys) { + + List orderedKeys = List.copyOf(hashKeys); + byte[] rawKey = rawKey(key); - byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); - return execute(connection -> connection.hTtl(rawKey, rawHashKeys)); + List raw = execute(connection -> connection.hashCommands().hPersist(rawKey, rawHashKeys)); + + return raw != null ? ExpireChanges.of(orderedKeys, raw) : null; } @Override - public List getExpire(K key, TimeUnit timeUnit, Collection hashKeys) { + public Expirations getExpire(K key, TimeUnit timeUnit, Collection hashKeys) { + + if(timeUnit.compareTo(TimeUnit.MILLISECONDS) < 0) { + throw new IllegalArgumentException("%s precision is not supported must be >= MILLISECONDS".formatted(timeUnit)); + } + + List orderedKeys = List.copyOf(hashKeys); + byte[] rawKey = rawKey(key); - byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); + + List raw = execute( + connection -> TimeUnit.MILLISECONDS.equals(timeUnit) ? connection.hashCommands().hpTtl(rawKey, rawHashKeys) + : connection.hashCommands().hTtl(rawKey, timeUnit, rawHashKeys)); + + if (raw == null) { + return null; + } - return execute(connection -> connection.hTtl(rawKey, timeUnit, rawHashKeys)); + Timeouts timeouts = new Timeouts(TimeUnit.MILLISECONDS.equals(timeUnit) ? timeUnit : TimeUnit.SECONDS, raw); + return Expirations.of(timeUnit, orderedKeys, timeouts); } @Override diff --git a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java index c3e004c25d..070127c235 100644 --- a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java @@ -19,16 +19,20 @@ import reactor.core.publisher.Mono; import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import org.reactivestreams.Publisher; import org.springframework.dao.InvalidDataAccessApiUsageException; import org.springframework.data.redis.connection.ReactiveHashCommands; import org.springframework.data.redis.connection.convert.Converters; +import org.springframework.data.redis.core.Expirations.Timeouts; import org.springframework.data.redis.serializer.RedisSerializationContext; import org.springframework.lang.Nullable; import org.springframework.util.Assert; @@ -63,8 +67,7 @@ public Mono remove(H key, Object... hashKeys) { Assert.noNullElements(hashKeys, "Hash keys must not contain null elements"); return createMono(hashCommands -> Flux.fromArray(hashKeys) // - .map(hashKey -> (HK) hashKey) - .map(this::rawHashKey) // + .map(hashKey -> (HK) hashKey).map(this::rawHashKey) // .collectList() // .flatMap(hks -> hashCommands.hDel(rawKey(key), hks))); } @@ -86,8 +89,8 @@ public Mono get(H key, Object hashKey) { Assert.notNull(key, "Key must not be null"); Assert.notNull(hashKey, "Hash key must not be null"); - return createMono(hashCommands -> hashCommands.hGet(rawKey(key), rawHashKey((HK) hashKey)) - .map(this::readHashValue)); + return createMono( + hashCommands -> hashCommands.hGet(rawKey(key), rawHashKey((HK) hashKey)).map(this::readHashValue)); } @Override @@ -109,8 +112,8 @@ public Mono increment(H key, HK hashKey, long delta) { Assert.notNull(key, "Key must not be null"); Assert.notNull(hashKey, "Hash key must not be null"); - return template.doCreateMono(connection -> connection.numberCommands() - .hIncrBy(rawKey(key), rawHashKey(hashKey), delta)); + return template + .doCreateMono(connection -> connection.numberCommands().hIncrBy(rawKey(key), rawHashKey(hashKey), delta)); } @Override @@ -119,8 +122,8 @@ public Mono increment(H key, HK hashKey, double delta) { Assert.notNull(key, "Key must not be null"); Assert.notNull(hashKey, "Hash key must not be null"); - return template.doCreateMono(connection -> connection.numberCommands() - .hIncrBy(rawKey(key), rawHashKey(hashKey), delta)); + return template + .doCreateMono(connection -> connection.numberCommands().hIncrBy(rawKey(key), rawHashKey(hashKey), delta)); } @Override @@ -137,8 +140,7 @@ public Mono> randomEntry(H key) { Assert.notNull(key, "Key must not be null"); - return createMono(hashCommands -> hashCommands.hRandFieldWithValues(rawKey(key))) - .map(this::deserializeHashEntry); + return createMono(hashCommands -> hashCommands.hRandFieldWithValues(rawKey(key))).map(this::deserializeHashEntry); } @Override @@ -235,6 +237,78 @@ public Flux> scan(H key, ScanOptions options) { .map(this::deserializeHashEntry)); } + @Override + public Mono> expire(H key, Duration timeout, Collection hashKeys) { + + List orderedKeys = List.copyOf(hashKeys); + ByteBuffer rawKey = rawKey(key); + List rawHashKeys = orderedKeys.stream().map(this::rawHashKey).toList(); + + Mono> raw = createFlux(connection -> { + + if (TimeoutUtils.hasMillis(timeout)) { + return connection.hpExpire(rawKey, timeout, rawHashKeys); + } + + return connection.hExpire(rawKey, timeout, rawHashKeys); + }).collectList(); + + return raw.map(values -> ExpireChanges.of(orderedKeys, values)); + } + + @Nullable + @Override + public Mono> expireAt(H key, Instant expireAt, Collection hashKeys) { + + List orderedKeys = List.copyOf(hashKeys); + ByteBuffer rawKey = rawKey(key); + List rawHashKeys = orderedKeys.stream().map(this::rawHashKey).toList(); + + Mono> raw = createFlux(connection -> connection.hExpireAt(rawKey, expireAt, rawHashKeys)).collectList(); + + return raw.map(values -> ExpireChanges.of(orderedKeys, values)); + } + + @Nullable + @Override + public Mono> persist(H key, Collection hashKeys) { + + List orderedKeys = List.copyOf(hashKeys); + ByteBuffer rawKey = rawKey(key); + List rawHashKeys = orderedKeys.stream().map(this::rawHashKey).toList(); + + Mono> raw = createFlux(connection -> connection.hPersist(rawKey, rawHashKeys)).collectList(); + + return raw.map(values -> ExpireChanges.of(orderedKeys, values)); + } + + @Nullable + @Override + public Mono> getExpire(H key, TimeUnit timeUnit, Collection hashKeys) { + + if (timeUnit.compareTo(TimeUnit.MILLISECONDS) < 0) { + throw new IllegalArgumentException("%s precision is not supported must be >= MILLISECONDS".formatted(timeUnit)); + } + + List orderedKeys = List.copyOf(hashKeys); + ByteBuffer rawKey = rawKey(key); + List rawHashKeys = orderedKeys.stream().map(this::rawHashKey).toList(); + + Mono> raw = createFlux(connection -> { + + if (TimeUnit.MILLISECONDS.equals(timeUnit)) { + return connection.hpTtl(rawKey, rawHashKeys); + } + return connection.hTtl(rawKey, rawHashKeys); + }).collectList(); + + return raw.map(values -> { + + Timeouts timeouts = new Timeouts(TimeUnit.MILLISECONDS.equals(timeUnit) ? timeUnit : TimeUnit.SECONDS, values); + return Expirations.of(timeUnit, orderedKeys, timeouts); + }); + } + @Override public Mono delete(H key) { diff --git a/src/main/java/org/springframework/data/redis/core/Expirations.java b/src/main/java/org/springframework/data/redis/core/Expirations.java new file mode 100644 index 0000000000..a2475e4fa0 --- /dev/null +++ b/src/main/java/org/springframework/data/redis/core/Expirations.java @@ -0,0 +1,303 @@ +/* + * Copyright 2025 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.core; + +import java.time.Duration; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import org.springframework.lang.Nullable; +import org.springframework.util.CollectionUtils; +import org.springframework.util.ObjectUtils; + +/** + * Value Object linking a number of keys to their {@link Expiration} retaining the order of the original source. + * Dedicated higher level methods interpret raw expiration values retrieved from a Redis Client. + *
    + *
  1. {@link #persistent()} returns keys that do not have an associated time to live
  2. + *
  3. {@link #missing()} returns keys that do not exist and therefore have no associated time to live
  4. + *
  5. {@link #expirations()} returns the ordered list of {@link Expiration expirations} based on the raw values
  6. + *
  7. {@link #expiring()} returns the expiring keys along with their {@link Duration time to live}
  8. + *
+ * + * @author Christoph Strobl + * @since 3.5 + */ +public class Expirations { + + private final TimeUnit unit; + private final Map expirations; + + Expirations(TimeUnit unit, Map expirations) { + this.unit = unit; + this.expirations = expirations; + } + + /** + * Factory Method to create {@link Expirations} from raw sources provided in a given {@link TimeUnit}. + * + * @param targetUnit the actual time unit of the raw timeToLive values. + * @param keys the keys to associated with the raw values in timeToLive. Defines the actual order of entries within + * {@link Expirations}. + * @param timeouts the raw Redis time to live values. + * @return new instance of {@link Expirations}. + * @param the key type used + */ + public static Expirations of(TimeUnit targetUnit, List keys, Timeouts timeouts) { + + if (keys.size() != timeouts.size()) { + throw new IllegalArgumentException( + "Keys and Timeouts must be of same size but was %s vs %s".formatted(keys.size(), timeouts.size())); + } + if (keys.size() == 1) { + return new Expirations<>(targetUnit, + Map.of(keys.iterator().next(), Expiration.of(timeouts.raw().iterator().next(), timeouts.timeUnit()))); + } + + Map target = CollectionUtils.newLinkedHashMap(keys.size()); + for (int i = 0; i < keys.size(); i++) { + target.put(keys.get(i), Expiration.of(timeouts.get(i), timeouts.timeUnit())); + } + return new Expirations<>(targetUnit, target); + } + + /** + * @return an ordered set of keys that do not have a time to live. + */ + public Set persistent() { + return filterByState(Expiration.PERSISTENT); + } + + /** + * @return an ordered set of keys that do not exists and therefore do not have a time to live. + */ + public Set missing() { + return filterByState(Expiration.MISSING); + } + + /** + * @return an ordered set of all {@link Expirations expirations} where the {@link Expiration#value()} is using the + * {@link TimeUnit} defined in {@link #precision()}. + */ + public List expirations() { + return expirations.values().stream().map(it -> it.convert(this.unit)).toList(); + } + + /** + * @return the {@link TimeUnit} for {@link Expiration expirations} held by this instance. + */ + public TimeUnit precision() { + return unit; + } + + /** + * @return an ordered {@link List} of {@link java.util.Map.Entry entries} combining keys with their actual time to + * live. {@link Expiration#isMissing() Missing} and {@link Expiration#isPersistent() persistent} entries are + * skipped. + */ + public List> expiring() { + return expirations.entrySet().stream().filter(it -> !it.getValue().isMissing() && !it.getValue().isPersistent()) + .map(it -> Map.entry(it.getKey(), toDuration(it.getValue()))).toList(); + } + + /** + * @param key + * @return the {@link Expirations expirations} where the {@link Expiration#value()} is using the {@link TimeUnit} + * defined in {@link #precision()} or {@literal null} if no entry could be found. + */ + @Nullable + public Expiration expirationOf(K key) { + + Expiration expiration = expirations.get(key); + if (expiration == null) { + return null; + } + + return expiration.convert(this.unit); + } + + /** + * @param key + * @return the time to live value of the requested key if it exists and the expiration is neither + * {@link Expiration#isMissing() missing} nor {@link Expiration#isPersistent() persistent}, {@literal null} + * otherwise. + */ + @Nullable + public Duration ttlOf(K key) { + + Expiration expiration = expirationOf(key); + if (expiration == null) { + return null; + } + return toDuration(expiration); + } + + private Set filterByState(Expiration filter) { + return expirations.entrySet().stream().filter(entry -> entry.getValue().equals(filter)).map(Map.Entry::getKey) + .collect(Collectors.toCollection(LinkedHashSet::new)); + } + + @Nullable + static Duration toDuration(Expiration expiration) { + + if (expiration.sourceUnit == null) { + return null; + } + return Duration.of(expiration.raw(), expiration.sourceUnit.toChronoUnit()); + } + + public record Timeouts(TimeUnit timeUnit, List raw) { + + Long get(int index) { + return raw.get(index); + } + + public int size() { + return raw.size(); + } + } + + /** + * Expiration holds time to live {@link #raw()} values as returned by a Redis Client. {@link #value()} serves the + * actual timeout in the given temporal context converting the {@link #raw()} value into a target {@link TimeUnit}. + * Dedicated methods such as {@link #isPersistent()} allow interpretation of the raw result. {@link #MISSING} and + * {@link #PERSISTENT} mark predefined states returned by Redis indicating a time to live value could not be retrieved + * due to various reasons. + */ + public static class Expiration { + + private final long raw; + @Nullable TimeUnit sourceUnit; + @Nullable TimeUnit targetUnit; + + public Expiration(long value) { + this(value, null); + } + + public Expiration(long value, @Nullable TimeUnit sourceUnit) { + this(value, sourceUnit, null); + } + + public Expiration(long value, @Nullable TimeUnit sourceUnit, @Nullable TimeUnit targetUnit) { + this.raw = value; + this.sourceUnit = sourceUnit; + this.targetUnit = targetUnit; + } + + /** + * The raw source value as returned by the Redis Client. + * + * @return the raw data + */ + public long raw() { + return raw; + } + + /** + * @return the {@link #raw()} value converted into the {@link #convert(TimeUnit) requested} target {@link TimeUnit}. + */ + public long value() { + + if (sourceUnit == null || targetUnit == null) { + return raw; + } + return targetUnit.convert(raw, sourceUnit); + } + + /** + * @param timeUnit must not be {@literal null}. + * @return the {@link Expiration} instance with new target {@link TimeUnit} set for obtaining the {@link #value() + * value}, or the same instance raw value cannot or must not be converted. + */ + public Expiration convert(TimeUnit timeUnit) { + + if (sourceUnit == null || ObjectUtils.nullSafeEquals(sourceUnit, timeUnit)) { + return this; + } + return new Expiration(raw, sourceUnit, timeUnit); + } + + /** + * Predefined {@link Expiration} for a key that does not exists and therefore does not have a time to live. + */ + public static Expiration MISSING = new Expiration(-2L); + + /** + * Predefined {@link Expiration} for a key that exists but does not expire. + */ + public static Expiration PERSISTENT = new Expiration(-1L); + + /** + * @return {@literal true} if key exists but does not expire. + */ + public boolean isPersistent() { + return PERSISTENT.equals(this); + } + + /** + * @return {@literal true} if key does not exists and therefore does not have a time to live. + */ + public boolean isMissing() { + return MISSING.equals(this); + } + + /** + * Factory method for creating {@link Expiration} instances, returning predefined ones if the value matches a known + * reserved state. + * + * @return the {@link Expiration} for the given raw value. + */ + static Expiration of(Number value, TimeUnit timeUnit) { + return switch (value.intValue()) { + case -2 -> MISSING; + case -1 -> PERSISTENT; + default -> new Expiration(value.longValue(), timeUnit); + }; + } + + @Override + public boolean equals(Object o) { + + if (o == this) { + return true; + } + + if (!(o instanceof Expiration that)) { + return false; + } + + if (!ObjectUtils.nullSafeEquals(this.sourceUnit, that.sourceUnit)) { + return false; + } + + if (!ObjectUtils.nullSafeEquals(this.targetUnit, that.targetUnit)) { + return false; + } + + return this.raw == that.raw; + } + + @Override + public int hashCode() { + return Objects.hash(raw); + } + } +} diff --git a/src/main/java/org/springframework/data/redis/core/ExpireChanges.java b/src/main/java/org/springframework/data/redis/core/ExpireChanges.java new file mode 100644 index 0000000000..b9486f639d --- /dev/null +++ b/src/main/java/org/springframework/data/redis/core/ExpireChanges.java @@ -0,0 +1,195 @@ +/* + * Copyright 2025 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.core; + +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import org.springframework.util.CollectionUtils; + +/** + * Value Object linking a number of keys to their {@link ExpiryChangeState} retaining the order of the original source. + * Dedicated higher level methods interpret raw values retrieved from a Redis Client. + *
    + *
  1. {@link #ok()} returns keys for which the time to live has been set
  2. + *
  3. {@link #expired()} returns keys that have been expired
  4. + *
  5. {@link #missed()} returns keys for which the time to live could not be set because they do not exist
  6. + *
  7. {@link #skipped()} returns keys for which the time to live has not been set because a precondition was not + * met
  8. + *
+ * + * @author Christoph Strobl + * @since 3.5 + */ +public class ExpireChanges { + + private final Map changes; + + ExpireChanges(Map changes) { + this.changes = changes; + } + + /** + * Factory Method to create {@link ExpireChanges} from raw sources. + * + * @param keys the keys to associated with the raw values in states. Defines the actual order of entries within + * {@link ExpireChanges}. + * @param states the raw Redis state change values. + * @return new instance of {@link ExpireChanges}. + * @param the key type used + */ + public static ExpireChanges of(List keys, List states) { + + if (keys.size() == 1) { + return new ExpireChanges<>(Map.of(keys.iterator().next(), stateFromValue(states.iterator().next()))); + } + + Map target = CollectionUtils.newLinkedHashMap(keys.size()); + for (int i = 0; i < keys.size(); i++) { + target.put(keys.get(i), stateFromValue(states.get(i))); + } + return new ExpireChanges<>(target); + } + + /** + * @return an ordered {@link List} of the status changes. + */ + public List stateChanges() { + return List.copyOf(changes.values()); + } + + /** + * @return the status change for the given {@literal key}, or {@literal null} if {@link ExpiryChangeState} does not + * contain an entry for it. + */ + public ExpiryChangeState stateOf(K key) { + return changes.get(key); + } + + /** + * @return {@literal true} if all changes are {@link ExpiryChangeState#OK}. + */ + public boolean allOk() { + return allMach(ExpiryChangeState.OK::equals); + } + + /** + * @return {@literal true} if all changes are either ok {@link ExpiryChangeState#OK} or + * {@link ExpiryChangeState#EXPIRED}. + */ + public boolean allChanged() { + return allMach(it -> ExpiryChangeState.OK.equals(it) || ExpiryChangeState.EXPIRED.equals(it)); + } + + /** + * @return an ordered list of if all changes are {@link ExpiryChangeState#OK}. + */ + public Set ok() { + return filterByState(ExpiryChangeState.OK); + } + + /** + * @return an ordered list of if all changes are {@link ExpiryChangeState#EXPIRED}. + */ + public Set expired() { + return filterByState(ExpiryChangeState.EXPIRED); + } + + /** + * @return an ordered list of if all changes are {@link ExpiryChangeState#DOES_NOT_EXIST}. + */ + public Set missed() { + return filterByState(ExpiryChangeState.DOES_NOT_EXIST); + } + + /** + * @return an ordered list of if all changes are {@link ExpiryChangeState#CONDITION_NOT_MET}. + */ + public Set skipped() { + return filterByState(ExpiryChangeState.CONDITION_NOT_MET); + } + + public boolean allMach(Predicate predicate) { + return changes.values().stream().allMatch(predicate); + } + + private Set filterByState(ExpiryChangeState filter) { + return changes.entrySet().stream().filter(entry -> entry.getValue().equals(filter)).map(Map.Entry::getKey) + .collect(Collectors.toCollection(LinkedHashSet::new)); + } + + private static ExpiryChangeState stateFromValue(Number value) { + return ExpiryChangeState.of(value); + } + + public record ExpiryChangeState(long value) { + + public static final ExpiryChangeState DOES_NOT_EXIST = new ExpiryChangeState(-2L); + public static final ExpiryChangeState CONDITION_NOT_MET = new ExpiryChangeState(0L); + public static final ExpiryChangeState OK = new ExpiryChangeState(1L); + public static final ExpiryChangeState EXPIRED = new ExpiryChangeState(2L); + + static ExpiryChangeState of(Number value) { + return switch (value.intValue()) { + case -2 -> DOES_NOT_EXIST; + case 0 -> CONDITION_NOT_MET; + case 1 -> OK; + case 2 -> EXPIRED; + default -> new ExpiryChangeState(value.longValue()); + }; + } + + public boolean isOk() { + return OK.equals(this); + } + + public boolean isExpired() { + return EXPIRED.equals(this); + } + + public boolean isMissing() { + return DOES_NOT_EXIST.equals(this); + } + + public boolean isSkipped() { + return CONDITION_NOT_MET.equals(this); + } + + @Override + public boolean equals(Object o) { + + if (o == this) { + return true; + } + + if (!(o instanceof ExpiryChangeState that)) { + return false; + } + + return this.value == that.value; + } + + @Override + public int hashCode() { + return Objects.hash(value); + } + } +} diff --git a/src/main/java/org/springframework/data/redis/core/HashOperations.java b/src/main/java/org/springframework/data/redis/core/HashOperations.java index ea17d26e2d..26767be47f 100644 --- a/src/main/java/org/springframework/data/redis/core/HashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/HashOperations.java @@ -240,7 +240,7 @@ public interface HashOperations { * @since 3.5 */ @Nullable - List expire(H key, Duration timeout, Collection hashKeys); + ExpireChanges expire(H key, Duration timeout, Collection hashKeys); /** * Set the expiration for given {@code hashKey} (aka field) as a {@literal date} timestamp. @@ -257,7 +257,7 @@ public interface HashOperations { * @since 3.5 */ @Nullable - List expireAt(H key, Instant expireAt, Collection hashKeys); + ExpireChanges expireAt(H key, Instant expireAt, Collection hashKeys); /** * Remove the expiration from given {@code hashKey} (aka field). @@ -271,7 +271,7 @@ public interface HashOperations { * @since 3.5 */ @Nullable - List persist(H key, Collection hashKeys); + ExpireChanges persist(H key, Collection hashKeys); /** * Get the time to live for {@code hashKey} (aka field) in seconds. @@ -285,7 +285,9 @@ public interface HashOperations { * @since 3.5 */ @Nullable - List getExpire(H key, Collection hashKeys); + default Expirations getExpire(H key, Collection hashKeys) { + return getExpire(key, TimeUnit.SECONDS, hashKeys); + } /** * Get the time to live for {@code hashKey} (aka field) and convert it to the given {@link TimeUnit}. @@ -300,7 +302,8 @@ public interface HashOperations { * @since 3.5 */ @Nullable - List getExpire(H key, TimeUnit timeUnit, Collection hashKeys); + Expirations getExpire(H key, TimeUnit timeUnit, Collection hashKeys); + /** * @return never {@literal null}. */ diff --git a/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java b/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java index 2151590ecc..5bac3e9d9e 100644 --- a/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java @@ -19,9 +19,14 @@ import reactor.core.publisher.Mono; import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.springframework.lang.Nullable; /** * Reactive Redis operations for Hash Commands. @@ -230,10 +235,78 @@ default Flux> scan(H key) { */ Flux> scan(H key, ScanOptions options); + Mono> expire(H key, Duration timeout, Collection hashKeys); + + /** + * Set the expiration for given {@code hashKey} (aka field) as a {@literal date} timestamp. + * + * @param key must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. + * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + Mono> expireAt(H key, Instant expireAt, Collection hashKeys); + + /** + * Remove the expiration from given {@code hashKey} (aka field). + * + * @param key must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is + * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + @Nullable + Mono> persist(H key, Collection hashKeys); + + /** + * Get the time to live for {@code hashKey} (aka field) in seconds. + * + * @param key must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + default Mono> getExpire(H key, Collection hashKeys) { + return getExpire(key, TimeUnit.SECONDS, hashKeys); + } + + /** + * Get the time to live for {@code hashKey} (aka field) and convert it to the given {@link TimeUnit}. + * + * @param key must not be {@literal null}. + * @param timeUnit must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + Mono> getExpire(H key, TimeUnit timeUnit, Collection hashKeys); + /** * Removes the given {@literal key}. * * @param key must not be {@literal null}. */ Mono delete(H key); + } diff --git a/src/main/java/org/springframework/data/redis/core/TimeoutUtils.java b/src/main/java/org/springframework/data/redis/core/TimeoutUtils.java index 86d90ca882..c46e8478d6 100644 --- a/src/main/java/org/springframework/data/redis/core/TimeoutUtils.java +++ b/src/main/java/org/springframework/data/redis/core/TimeoutUtils.java @@ -35,7 +35,11 @@ public abstract class TimeoutUtils { * @since 2.1 */ public static boolean hasMillis(Duration duration) { - return duration.toMillis() % 1000 != 0; + return containsSplitSecond(duration.toMillis()); + } + + public static boolean containsSplitSecond(long millis) { + return millis % 1000 != 0; } /** diff --git a/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java b/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java index 547c351875..ad22195ad0 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java +++ b/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java @@ -20,13 +20,14 @@ import java.util.Collection; import java.util.Collections; import java.util.Date; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; import org.springframework.data.redis.connection.DataType; +import org.springframework.data.redis.core.Expirations; +import org.springframework.data.redis.core.ExpireChanges; import org.springframework.data.redis.core.BoundHashOperations; import org.springframework.data.redis.core.Cursor; import org.springframework.data.redis.core.RedisOperations; @@ -327,27 +328,27 @@ public Cursor> scan() { } @Override - public List expire(Duration timeout, Collection hashKeys) { + public ExpireChanges expire(Duration timeout, Collection hashKeys) { return Objects.requireNonNull(hashOps.expire(timeout, hashKeys)); } @Override - public List expireAt(Instant expireAt, Collection hashKeys) { + public ExpireChanges expireAt(Instant expireAt, Collection hashKeys) { return Objects.requireNonNull(hashOps.expireAt(expireAt, hashKeys)); } @Override - public List persist(Collection hashKeys) { + public ExpireChanges persist(Collection hashKeys) { return Objects.requireNonNull(hashOps.persist(hashKeys)); } @Override - public List getExpire(Collection hashKeys) { + public Expirations getExpire(Collection hashKeys) { return Objects.requireNonNull(hashOps.getExpire(hashKeys)); } @Override - public List getExpire(TimeUnit timeUnit, Collection hashKeys) { + public Expirations getExpire(TimeUnit timeUnit, Collection hashKeys) { return Objects.requireNonNull(hashOps.getExpire(timeUnit, hashKeys)); } diff --git a/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java b/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java index 4b79cf0290..54d002d549 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java +++ b/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java @@ -19,11 +19,12 @@ import java.time.Instant; import java.util.Collection; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; +import org.springframework.data.redis.core.Expirations; +import org.springframework.data.redis.core.ExpireChanges; import org.springframework.lang.Nullable; /** @@ -91,7 +92,7 @@ public interface RedisMap extends RedisStore, ConcurrentMap { * @see Redis Documentation: HEXPIRE * @since 3.5 */ - List expire(Duration timeout, Collection hashKeys); + ExpireChanges expire(Duration timeout, Collection hashKeys); /** * Set the expiration for given hash {@code key} as a {@literal date} timestamp. @@ -106,7 +107,7 @@ public interface RedisMap extends RedisStore, ConcurrentMap { * @see Redis Documentation: HEXPIRE * @since 3.5 */ - List expireAt(Instant expireAt, Collection hashKeys); + ExpireChanges expireAt(Instant expireAt, Collection hashKeys); /** * Remove the expiration from given hash {@code key}. @@ -118,7 +119,7 @@ public interface RedisMap extends RedisStore, ConcurrentMap { * @see Redis Documentation: HPERSIST * @since 3.5 */ - List persist(Collection hashKeys); + ExpireChanges persist(Collection hashKeys); /** * Get the time to live for hash {@code key} in seconds. @@ -130,7 +131,7 @@ public interface RedisMap extends RedisStore, ConcurrentMap { * @see Redis Documentation: HTTL * @since 3.5 */ - List getExpire(Collection hashKeys); + Expirations getExpire(Collection hashKeys); /** * Get the time to live for hash {@code key} and convert it to the given {@link TimeUnit}. @@ -143,5 +144,5 @@ public interface RedisMap extends RedisStore, ConcurrentMap { * @see Redis Documentation: HTTL * @since 3.5 */ - List getExpire(TimeUnit timeUnit, Collection hashKeys); + Expirations getExpire(TimeUnit timeUnit, Collection hashKeys); } diff --git a/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java b/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java index 68981643d9..54d7f0c9d3 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java +++ b/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java @@ -24,6 +24,8 @@ import java.util.concurrent.TimeUnit; import org.springframework.data.redis.connection.DataType; +import org.springframework.data.redis.core.Expirations; +import org.springframework.data.redis.core.ExpireChanges; import org.springframework.data.redis.core.BoundHashOperations; import org.springframework.data.redis.core.RedisOperations; import org.springframework.lang.Nullable; @@ -300,33 +302,38 @@ public Iterator> scan() { } @Override - public List expire(Duration timeout, Collection hashKeys) { + public ExpireChanges expire(Duration timeout, Collection hashKeys) { + Collection keys = hashKeys.stream().map(key -> (String) key).toList(); - return Objects.requireNonNull(hashOps.expire(timeout, keys)); + return (ExpireChanges) hashOps.expire(timeout, keys); } @Override - public List expireAt(Instant expireAt, Collection hashKeys) { + public ExpireChanges expireAt(Instant expireAt, Collection hashKeys) { + Collection keys = hashKeys.stream().map(key -> (String) key).toList(); - return Objects.requireNonNull(hashOps.expireAt(expireAt, keys)); + return (ExpireChanges) hashOps.expireAt(expireAt, keys); } @Override - public List persist(Collection hashKeys) { + public ExpireChanges persist(Collection hashKeys) { + Collection keys = hashKeys.stream().map(key -> (String) key).toList(); - return Objects.requireNonNull(hashOps.persist(keys)); + return (ExpireChanges) hashOps.persist(keys); } @Override - public List getExpire(Collection hashKeys) { + public Expirations getExpire(Collection hashKeys) { + Collection keys = hashKeys.stream().map(key -> (String) key).toList(); - return Objects.requireNonNull(hashOps.getExpire(keys)); + return (Expirations) hashOps.getExpire(keys); } @Override - public List getExpire(TimeUnit timeUnit, Collection hashKeys) { + public Expirations getExpire(TimeUnit timeUnit, Collection hashKeys) { + Collection keys = hashKeys.stream().map(key -> (String) key).toList(); - return Objects.requireNonNull(hashOps.getExpire(timeUnit, keys)); + return (Expirations) hashOps.getExpire(timeUnit, keys); } } diff --git a/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java b/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java index d42153cb68..ffa5bcd100 100644 --- a/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java @@ -3436,6 +3436,7 @@ void hStrLenReturnsZeroWhenKeyDoesNotExist() { @Test @EnabledOnCommand("HEXPIRE") public void hExpireReturnsSuccessAndSetsTTL() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); actual.add(connection.hExpire("hash-hexpire", 5L, "key-2")); actual.add(connection.hTtl("hash-hexpire", "key-2")); @@ -3449,6 +3450,7 @@ public void hExpireReturnsSuccessAndSetsTTL() { @Test @EnabledOnCommand("HEXPIRE") public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); actual.add(connection.hExpire("hash-hexpire", 5L, "missking-field")); actual.add(connection.hExpire("missing-key", 5L, "key-2")); @@ -3459,6 +3461,7 @@ public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { @Test @EnabledOnCommand("HEXPIRE") public void hExpireReturnsTwoWhenZeroProvided() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); actual.add(connection.hExpire("hash-hexpire", 0, "key-2")); @@ -3468,9 +3471,10 @@ public void hExpireReturnsTwoWhenZeroProvided() { @Test @EnabledOnCommand("HPEXPIRE") public void hpExpireReturnsSuccessAndSetsTTL() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); actual.add(connection.hpExpire("hash-hexpire", 5000L, "key-2")); - actual.add(connection.hTtl("hash-hexpire", TimeUnit.MILLISECONDS,"key-2")); + actual.add(connection.hpTtl("hash-hexpire", "key-2")); List results = getResults(); assertThat(results.get(0)).isEqualTo(Boolean.TRUE); @@ -3481,6 +3485,7 @@ public void hpExpireReturnsSuccessAndSetsTTL() { @Test @EnabledOnCommand("HPEXPIRE") public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); actual.add(connection.hpExpire("hash-hexpire", 5L, "missing-field")); actual.add(connection.hpExpire("missing-key", 5L, "key-2")); @@ -3491,6 +3496,7 @@ public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { @Test @EnabledOnCommand("HPEXPIRE") public void hpExpireReturnsTwoWhenZeroProvided() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); actual.add(connection.hpExpire("hash-hexpire", 0, "key-2")); @@ -3500,6 +3506,7 @@ public void hpExpireReturnsTwoWhenZeroProvided() { @Test @EnabledOnCommand("HEXPIREAT") public void hExpireAtReturnsSuccessAndSetsTTL() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); @@ -3515,6 +3522,7 @@ public void hExpireAtReturnsSuccessAndSetsTTL() { @Test @EnabledOnCommand("HEXPIREAT") public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); @@ -3527,6 +3535,7 @@ public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { @Test @EnabledOnCommand("HEXPIREAT") public void hExpireAtReturnsTwoWhenZeroProvided() { + long fiveSecondsAgo = Instant.now().minusSeconds(5L).getEpochSecond(); actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); @@ -3538,6 +3547,7 @@ public void hExpireAtReturnsTwoWhenZeroProvided() { @Test @EnabledOnCommand("HEXPIREAT") public void hpExpireAtReturnsSuccessAndSetsTTL() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); @@ -3553,6 +3563,7 @@ public void hpExpireAtReturnsSuccessAndSetsTTL() { @Test @EnabledOnCommand("HEXPIREAT") public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); @@ -3565,6 +3576,7 @@ public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { @Test @EnabledOnCommand("HPEXPIREAT") public void hpExpireAdReturnsTwoWhenZeroProvided() { + long fiveSecondsAgo = Instant.now().minusSeconds(5L).getEpochSecond(); actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); @@ -3576,6 +3588,7 @@ public void hpExpireAdReturnsTwoWhenZeroProvided() { @Test @EnabledOnCommand("HPERSIST") public void hPersistReturnsSuccessAndPersistsField() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); actual.add(connection.hExpire("hash-hexpire", 5L, "key-2")); actual.add(connection.hPersist("hash-hexpire", "key-2")); @@ -3587,6 +3600,7 @@ public void hPersistReturnsSuccessAndPersistsField() { @Test @EnabledOnCommand("HPERSIST") public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); actual.add(connection.hPersist("hash-hexpire", "key-2")); @@ -3596,6 +3610,7 @@ public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { @Test @EnabledOnCommand("HPERSIST") public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); actual.add(connection.hPersist("hash-hexpire", "missing-field")); actual.add(connection.hPersist("missing-key", "key-2")); @@ -3606,15 +3621,27 @@ public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { @Test @EnabledOnCommand("HTTL") public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); actual.add(connection.hTtl("hash-hexpire", "key-2")); verifyResults(Arrays.asList(Boolean.TRUE, List.of(-1L))); } + @Test + @EnabledOnCommand("HTTL") + public void hTtlReturnsMinusIndependendOfTimeUnitOneWhenFieldHasNoExpiration() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hTtl("hash-hexpire", TimeUnit.HOURS, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-1L))); + } + @Test @EnabledOnCommand("HTTL") public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { + actual.add(connection.hTtl("hash-hexpire", "missing-field")); actual.add(connection.hTtl("missing-key", "key-2")); diff --git a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java index 171cdda3b8..5611fb351f 100644 --- a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java +++ b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java @@ -1100,6 +1100,7 @@ public void hStrLenReturnsZeroWhenKeyDoesNotExist() { @Test @EnabledOnCommand("HEXPIRE") public void hExpireReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); @@ -1109,6 +1110,7 @@ public void hExpireReturnsSuccessAndSetsTTL() { @Test @EnabledOnCommand("HEXPIRE") public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); // missing field assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); @@ -1130,13 +1132,14 @@ public void hpExpireReturnsSuccessAndSetsTTL() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5000L, KEY_2_BYTES)).contains(1L); - assertThat(clusterConnection.hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS,KEY_2_BYTES)) + assertThat(clusterConnection.hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS, KEY_2_BYTES)) .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); } @Test @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); // missing field assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); @@ -1147,6 +1150,7 @@ public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { @Test @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); @@ -1155,6 +1159,7 @@ public void hpExpireReturnsTwoWhenZeroProvided() { @Test @EnabledOnCommand("HEXPIRE") public void hExpireAtReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); @@ -1164,11 +1169,13 @@ public void hExpireAtReturnsSuccessAndSetsTTL() { @Test @EnabledOnCommand("HEXPIRE") public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); // missing field assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_1_BYTES)).contains(-2L); + // missing key assertThat(clusterConnection.hashCommands().hExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); } @@ -1184,16 +1191,18 @@ public void hExpireAdReturnsTwoWhenZeroProvided() { @Test @EnabledOnCommand("HEXPIRE") public void hpExpireAtReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); - assertThat(clusterConnection.hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS, KEY_2_BYTES)) - .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); + assertThat(clusterConnection.hpTtl(KEY_1_BYTES, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isGreaterThan(1000L).isLessThanOrEqualTo(5000L)); } @Test @EnabledOnCommand("HEXPIRE") public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); @@ -1206,6 +1215,7 @@ public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { @Test @EnabledOnCommand("HEXPIRE") public void hpExpireAdReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); @@ -1214,6 +1224,7 @@ public void hpExpireAdReturnsTwoWhenZeroProvided() { @Test @EnabledOnCommand("HEXPIRE") public void hPersistReturnsSuccessAndPersistsField() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(1L); @@ -1223,6 +1234,7 @@ public void hPersistReturnsSuccessAndPersistsField() { @Test @EnabledOnCommand("HEXPIRE") public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); } @@ -1240,10 +1252,11 @@ public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { @Test @EnabledOnCommand("HEXPIRE") public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); - + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, TimeUnit.HOURS, KEY_2_BYTES)).contains(-1L); } @Test diff --git a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java index b9643dd331..88764daa33 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java @@ -15,8 +15,9 @@ */ package org.springframework.data.redis.core; -import static org.assertj.core.api.Assertions.*; -import static org.assertj.core.api.Assumptions.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assumptions.assumeThat; import java.io.IOException; import java.time.Duration; @@ -27,13 +28,15 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import org.assertj.core.api.InstanceOfAssertFactories; import org.junit.jupiter.api.BeforeEach; - import org.springframework.data.redis.ObjectFactory; import org.springframework.data.redis.RawObjectFactory; import org.springframework.data.redis.StringObjectFactory; import org.springframework.data.redis.connection.jedis.JedisConnectionFactory; import org.springframework.data.redis.connection.jedis.extension.JedisConnectionFactoryExtension; +import org.springframework.data.redis.core.Expirations.Expiration; +import org.springframework.data.redis.core.ExpireChanges.ExpiryChangeState; import org.springframework.data.redis.test.condition.EnabledOnCommand; import org.springframework.data.redis.test.extension.RedisStanalone; import org.springframework.data.redis.test.extension.parametrized.MethodSource; @@ -137,7 +140,6 @@ void testHScanReadsValuesFully() throws IOException { hashOps.put(key, key1, val1); hashOps.put(key, key2, val2); - long count = 0; try (Cursor> it = hashOps.scan(key, ScanOptions.scanOptions().count(1).build())) { @@ -222,10 +224,16 @@ void testExpireAndGetExpireMillis() { hashOps.put(key, key2, val2); assertThat(redisTemplate.opsForHash().expire(key, Duration.ofMillis(500), List.of(key1))) - .containsExactly(1L); + .satisfies(ExpireChanges::allOk); + + assertThat(redisTemplate.opsForHash().getExpire(key, List.of(key1))).satisfies(expirations -> { - assertThat(redisTemplate.opsForHash().getExpire(key, List.of(key1))) - .allSatisfy(it -> assertThat(it).isBetween(0L, 500L)); + assertThat(expirations.missing()).isEmpty(); + assertThat(expirations.precision()).isEqualTo(TimeUnit.SECONDS); + assertThat(expirations.expirationOf(key1)).extracting(Expiration::raw, InstanceOfAssertFactories.LONG) + .isBetween(0L, 1L); + assertThat(expirations.ttlOf(key1)).isBetween(Duration.ZERO, Duration.ofSeconds(1)); + }); } @ParameterizedRedisTest @@ -241,10 +249,22 @@ void testExpireAndGetExpireSeconds() { hashOps.put(key, key2, val2); assertThat(redisTemplate.opsForHash().expire(key, Duration.ofSeconds(5), List.of(key1, key2))) - .containsExactly(1L, 1L); + .satisfies(changes -> { + assertThat(changes.allOk()).isTrue(); + assertThat(changes.stateOf(key1)).isEqualTo(ExpiryChangeState.OK); + assertThat(changes.ok()).containsExactlyInAnyOrder(key1, key2); + assertThat(changes.missed()).isEmpty(); + assertThat(changes.stateChanges()).map(ExpiryChangeState::value).containsExactly(1L, 1L); + }); assertThat(redisTemplate.opsForHash().getExpire(key, TimeUnit.SECONDS, List.of(key1, key2))) - .allSatisfy(it -> assertThat(it).isBetween(0L, 5L)); + .satisfies(expirations -> { + assertThat(expirations.missing()).isEmpty(); + assertThat(expirations.precision()).isEqualTo(TimeUnit.SECONDS); + assertThat(expirations.expirationOf(key1)).extracting(Expiration::raw, InstanceOfAssertFactories.LONG) + .isBetween(0L, 5L); + assertThat(expirations.ttlOf(key1)).isBetween(Duration.ofSeconds(1), Duration.ofSeconds(5)); + }); } @ParameterizedRedisTest @@ -260,10 +280,26 @@ void testExpireAtAndGetExpireMillis() { hashOps.put(key, key2, val2); assertThat(redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(500), List.of(key1, key2))) - .containsExactly(1L, 1L); + .satisfies(ExpireChanges::allOk); + + assertThat(redisTemplate.opsForHash().getExpire(key, TimeUnit.MILLISECONDS, List.of(key1, key2))) + .satisfies(expirations -> { + assertThat(expirations.missing()).isEmpty(); + assertThat(expirations.precision()).isEqualTo(TimeUnit.MILLISECONDS); + assertThat(expirations.expirationOf(key1)).extracting(Expiration::raw, InstanceOfAssertFactories.LONG) + .isBetween(0L, 500L); + assertThat(expirations.ttlOf(key1)).isBetween(Duration.ZERO, Duration.ofMillis(500)); + }); + } + + @ParameterizedRedisTest + void expireThrowsErrorOfNanoPrecision() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); - assertThat(redisTemplate.opsForHash().getExpire(key, List.of(key1, key2))) - .allSatisfy(it -> assertThat(it).isBetween(0L, 500L)); + assertThatExceptionOfType(IllegalArgumentException.class) + .isThrownBy(() -> redisTemplate.opsForHash().getExpire(key, TimeUnit.NANOSECONDS, List.of(key1))); } @ParameterizedRedisTest @@ -278,13 +314,14 @@ void testPersistAndGetExpireMillis() { hashOps.put(key, key1, val1); hashOps.put(key, key2, val2); - assertThat(redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(500), List.of(key1, key2))) - .containsExactly(1L, 1L); + assertThat(redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(800), List.of(key1, key2))) + .satisfies(ExpireChanges::allOk); - assertThat(redisTemplate.opsForHash().persist(key, List.of(key1, key2))) - .allSatisfy(it -> assertThat(it).isEqualTo(1L)); + assertThat(redisTemplate.opsForHash().persist(key, List.of(key2))).satisfies(ExpireChanges::allOk); - assertThat(redisTemplate.opsForHash().getExpire(key, List.of(key1, key2))) - .allSatisfy(it -> assertThat(it).isEqualTo(-1L)); + assertThat(redisTemplate.opsForHash().getExpire(key, List.of(key1, key2))).satisfies(expirations -> { + assertThat(expirations.expirationOf(key1).isPersistent()).isFalse(); + assertThat(expirations.expirationOf(key2).isPersistent()).isTrue(); + }); } } diff --git a/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java index dd1e1287ef..fc73f9865e 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java @@ -15,28 +15,31 @@ */ package org.springframework.data.redis.core; -import static org.assertj.core.api.Assertions.*; -import static org.assertj.core.api.Assumptions.*; -import static org.junit.jupiter.api.condition.OS.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assumptions.assumeThat; +import static org.junit.jupiter.api.condition.OS.MAC; -import org.junit.jupiter.api.condition.DisabledOnOs; -import org.springframework.data.redis.connection.convert.Converters; import reactor.test.StepVerifier; +import java.time.Duration; +import java.time.Instant; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.BeforeEach; - +import org.junit.jupiter.api.condition.DisabledOnOs; import org.springframework.data.redis.ObjectFactory; import org.springframework.data.redis.RawObjectFactory; import org.springframework.data.redis.SettingsUtils; import org.springframework.data.redis.StringObjectFactory; import org.springframework.data.redis.connection.RedisConnection; import org.springframework.data.redis.connection.RedisConnectionFactory; +import org.springframework.data.redis.connection.convert.Converters; import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory; import org.springframework.data.redis.serializer.RedisSerializationContext; import org.springframework.data.redis.serializer.StringRedisSerializer; @@ -501,6 +504,113 @@ void scan() { .verifyComplete(); } + @EnabledOnCommand("HEXPIRE") + @ParameterizedRedisTest + void testExpireAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + putAll(key, key1, val1, key2, val2); + + hashOperations.expire(key, Duration.ofMillis(1500), List.of(key1)) // + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + + hashOperations.getExpire(key, List.of(key1)) // + .as(StepVerifier::create) // + .assertNext(it -> { + assertThat(it.expirationOf(key1).raw()).isBetween(0L, 2L); + }).verifyComplete(); + } + + @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") + void testExpireAndGetExpireSeconds() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + putAll(key, key1, val1, key2, val2); + + hashOperations.expire(key, Duration.ofSeconds(5), List.of(key1, key2)) // + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + + hashOperations.getExpire(key, TimeUnit.SECONDS, List.of(key1, key2)) // + .as(StepVerifier::create) // + .assertNext(it -> { + assertThat(it.expirationOf(key1).raw()).isBetween(0L, 5L); + assertThat(it.expirationOf(key2).raw()).isBetween(0L, 5L); + }).verifyComplete(); + + } + + @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") + void testExpireAtAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + putAll(key, key1, val1, key2, val2); + + redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(1500), List.of(key1, key2)) + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + + redisTemplate.opsForHash().getExpire(key, List.of(key1, key2)).as(StepVerifier::create)// + .assertNext(it -> { + assertThat(it.expirationOf(key1).raw()).isBetween(0L, 2L); + assertThat(it.expirationOf(key2).raw()).isBetween(0L, 2L); + }).verifyComplete(); + } + + @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") + void testPersistAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + putAll(key, key1, val1, key2, val2); + + redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(1500), List.of(key1, key2)) + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + + redisTemplate.opsForHash().persist(key, List.of(key1, key2)).as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + + redisTemplate.opsForHash().getExpire(key, List.of(key1, key2)).as(StepVerifier::create)// + .assertNext(expirations -> { + assertThat(expirations.persistent()).contains(key1, key2); + }).verifyComplete(); + + } + @ParameterizedRedisTest // DATAREDIS-602 void delete() { diff --git a/src/test/java/org/springframework/data/redis/core/ExpirationsUnitTest.java b/src/test/java/org/springframework/data/redis/core/ExpirationsUnitTest.java new file mode 100644 index 0000000000..5fc1953d3d --- /dev/null +++ b/src/test/java/org/springframework/data/redis/core/ExpirationsUnitTest.java @@ -0,0 +1,94 @@ +/* + * Copyright 2025 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.core; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.IntStream; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import org.springframework.data.redis.core.Expirations.Timeouts; + +/** + * @author Christoph Strobl + * @since 2025/02 + */ +class ExpirationsUnitTest { + + static final String KEY_1 = "key-1"; + static final String KEY_2 = "key-2"; + static final String KEY_3 = "key-3"; + + @ParameterizedTest + @EnumSource(TimeUnit.class) + void expirationMemorizesSourceUnit(TimeUnit targetUnit) { + + Expirations exp = Expirations.of(targetUnit, List.of(KEY_1), new Timeouts(TimeUnit.SECONDS, List.of(120L))); + + assertThat(exp.expirations().get(0)).satisfies(expiration -> { + assertThat(expiration.raw()).isEqualTo(120L); + assertThat(expiration.value()).isEqualTo(targetUnit.convert(120, TimeUnit.SECONDS)); + }); + } + + @Test + void expirationsCategorizesElements() { + + Expirations exp = createExpirations(new Timeouts(TimeUnit.SECONDS, List.of(-2L, -1L, 120L))); + + assertThat(exp.persistent()).containsExactly(KEY_2); + assertThat(exp.missing()).containsExactly(KEY_1); + assertThat(exp.expiring()).containsExactly(Map.entry(KEY_3, Duration.ofMinutes(2))); + } + + @Test + void returnsNullForMissingElements() { + + Expirations exp = createExpirations(new Timeouts(TimeUnit.SECONDS, List.of(-2L, -1L, 120L))); + + assertThat(exp.expirationOf("missing")).isNull(); + assertThat(exp.ttlOf("missing")).isNull(); + } + + @Test + void ttlReturnsDurationForEntriesWithTimeout() { + + Expirations exp = createExpirations(new Timeouts(TimeUnit.SECONDS, List.of(-2L, -1L, 120L))); + + assertThat(exp.ttlOf(KEY_3)).isEqualTo(Duration.ofMinutes(2)); + } + + @Test + void ttlReturnsNullForPersistentAndMissingEntries() { + + Expirations exp = createExpirations(new Timeouts(TimeUnit.SECONDS, List.of(-2L, -1L, 120L))); + + assertThat(exp.ttlOf(KEY_1)).isNull(); + assertThat(exp.ttlOf(KEY_2)).isNull(); + } + + static Expirations createExpirations(Timeouts timeouts) { + + List keys = IntStream.range(1, timeouts.raw().size() + 1).mapToObj("key-%s"::formatted).toList(); + return Expirations.of(timeouts.timeUnit(), keys, timeouts); + } +} diff --git a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java index b131a782d5..5ce8e54418 100644 --- a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java @@ -15,8 +15,9 @@ */ package org.springframework.data.redis.support.collections; -import static org.assertj.core.api.Assertions.*; -import static org.assertj.core.api.Assumptions.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assumptions.assumeThat; import java.io.IOException; import java.text.DecimalFormat; @@ -41,6 +42,7 @@ import org.springframework.data.redis.ObjectFactory; import org.springframework.data.redis.RawObjectFactory; import org.springframework.data.redis.RedisSystemException; +import org.springframework.data.redis.core.ExpireChanges; import org.springframework.data.redis.core.Cursor; import org.springframework.data.redis.core.RedisCallback; import org.springframework.data.redis.core.RedisOperations; @@ -197,31 +199,39 @@ void testIncrement() { @ParameterizedRedisTest @EnabledOnCommand("HEXPIRE") void testExpire() { + K k1 = getKey(); V v1 = getValue(); assertThat(map.put(k1, v1)).isEqualTo(null); Collection keys = Collections.singletonList(k1); - assertThat(map.expire(Duration.ofSeconds(5), keys)).contains(1L); - assertThat(map.getExpire(keys)).allSatisfy(expiration -> assertThat(expiration).isBetween(1L, 5L)); - assertThat(map.getExpire(TimeUnit.MILLISECONDS, keys)) - .allSatisfy(expiration -> assertThat(expiration).isBetween(1000L, 5000L)); - assertThat(map.persist(keys)).contains(1L); + assertThat(map.expire(Duration.ofSeconds(5), keys)).satisfies(ExpireChanges::allOk); + assertThat(map.getExpire(keys)).satisfies(expiration -> { + assertThat(expiration.expirationOf(k1).raw()).isBetween(1L, 5L); + }); + assertThat(map.getExpire(TimeUnit.MILLISECONDS, keys)).satisfies(expiration -> { + assertThat(expiration.expirationOf(k1).raw()).isBetween(1000L, 5000L); + }); + assertThat(map.persist(keys)).satisfies(ExpireChanges::allOk); } @ParameterizedRedisTest @EnabledOnCommand("HEXPIRE") void testExpireAt() { + K k1 = getKey(); V v1 = getValue(); assertThat(map.put(k1, v1)).isEqualTo(null); Collection keys = Collections.singletonList(k1); - assertThat(map.expireAt(Instant.now().plusSeconds(5), keys)).contains(1L); - assertThat(map.getExpire(keys)).allSatisfy(expiration -> assertThat(expiration).isBetween(1L, 5L)); - assertThat(map.getExpire(TimeUnit.MILLISECONDS, keys)) - .allSatisfy(expiration -> assertThat(expiration).isBetween(1000L, 5000L)); - assertThat(map.persist(keys)).contains(1L); + assertThat(map.expireAt(Instant.now().plusSeconds(5), keys)).satisfies(ExpireChanges::allOk); + assertThat(map.getExpire(keys)).satisfies(expiration -> { + assertThat(expiration.expirationOf(k1).raw()).isBetween(1L, 5L); + }); + assertThat(map.getExpire(TimeUnit.MILLISECONDS, keys)).satisfies(expiration -> { + assertThat(expiration.expirationOf(k1).raw()).isBetween(1000L, 5000L); + }); + assertThat(map.persist(keys)).satisfies(ExpireChanges::allOk); } @ParameterizedRedisTest From f5239b891668950712c74ce1b099ef4dc7f40bfd Mon Sep 17 00:00:00 2001 From: Christoph Strobl Date: Mon, 10 Feb 2025 14:59:00 +0100 Subject: [PATCH 05/13] tmp save - introduce options for hash field expiration --- .../DefaultStringRedisConnection.java | 11 ++ .../connection/DefaultedRedisConnection.java | 9 ++ .../data/redis/connection/Hash.java | 115 ++++++++++++++++++ .../redis/connection/RedisHashCommands.java | 36 ++++-- .../jedis/JedisClusterHashCommands.java | 49 +++++++- .../connection/jedis/JedisHashCommands.java | 53 +++++++- .../lettuce/LettuceHashCommands.java | 55 ++++++++- .../data/redis/core/BoundHashOperations.java | 8 ++ .../redis/core/DefaultHashOperations.java | 14 +++ .../data/redis/core/Expirations.java | 4 +- .../data/redis/core/HashOperations.java | 4 + ...DefaultHashOperationsIntegrationTests.java | 40 ++++++ 12 files changed, 373 insertions(+), 25 deletions(-) create mode 100644 src/main/java/org/springframework/data/redis/connection/Hash.java diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java index 77df3bc886..00a504a3ac 100644 --- a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java @@ -30,6 +30,7 @@ import org.springframework.data.geo.Metric; import org.springframework.data.geo.Point; import org.springframework.data.redis.RedisSystemException; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.convert.Converters; import org.springframework.data.redis.connection.convert.ListConverter; import org.springframework.data.redis.connection.convert.MapConverter; @@ -2566,6 +2567,11 @@ public Long hStrLen(byte[] key, byte[] field) { return convertAndReturn(delegate.hStrLen(key, field), Converters.identityConverter()); } + public @Nullable List expireHashField(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, + FieldExpirationOptions options, byte[]... fields) { + return this.delegate.expireHashField(key, expiration, options, fields); + } + @Override public List hExpire(byte[] key, long seconds, byte[]... fields) { return this.delegate.hExpire(key, seconds, fields); @@ -2606,6 +2612,11 @@ public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { return this.delegate.hTtl(key, timeUnit, fields); } + public @Nullable List expireHashField(String key, org.springframework.data.redis.core.types.Expiration expiration, + FieldExpirationOptions options, String... fields) { + return expireHashField(serialize(key), expiration, options, serializeMulti(fields)); + } + @Override public List hExpire(String key, long seconds, String... fields) { return hExpire(serialize(key), seconds, serializeMulti(fields)); diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java index 4ba3d3ec0f..5c1befad36 100644 --- a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java @@ -28,6 +28,7 @@ import org.springframework.data.geo.GeoResults; import org.springframework.data.geo.Metric; import org.springframework.data.geo.Point; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.stream.ByteRecord; import org.springframework.data.redis.connection.stream.Consumer; import org.springframework.data.redis.connection.stream.MapRecord; @@ -1527,6 +1528,14 @@ default List hpTtl(byte[] key, byte[]... fields) { return hashCommands().hpTtl(key, fields); } + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default @Nullable List expireHashField(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, + FieldExpirationOptions options, byte[]... fields) { + return hashCommands().expireHashField(key, expiration, options, fields); + } + // GEO COMMANDS /** @deprecated in favor of {@link RedisConnection#geoCommands()}}. */ diff --git a/src/main/java/org/springframework/data/redis/connection/Hash.java b/src/main/java/org/springframework/data/redis/connection/Hash.java new file mode 100644 index 0000000000..c8d0e8a248 --- /dev/null +++ b/src/main/java/org/springframework/data/redis/connection/Hash.java @@ -0,0 +1,115 @@ +/* + * Copyright 2025 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.connection; + +import java.util.Objects; + +import org.springframework.lang.Contract; +import org.springframework.lang.Nullable; +import org.springframework.util.ObjectUtils; + +/** + * @author Christoph Strobl + * @since 3.5 + */ +public interface Hash { + + class FieldExpirationOptions { + + private static final FieldExpirationOptions NONE = new FieldExpirationOptions(null); + private @Nullable Condition condition; + + FieldExpirationOptions(@Nullable Condition condition) { + this.condition = condition; + } + + public static FieldExpirationOptions none() { + return NONE; + } + + @Contract("_ -> new") + public static FieldExpireOptionsBuilder builder() { + return new FieldExpireOptionsBuilder(); + } + + public @Nullable Condition getCondition() { + return condition; + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + FieldExpirationOptions that = (FieldExpirationOptions) o; + return ObjectUtils.nullSafeEquals(this.condition, that.condition); + } + + @Override + public int hashCode() { + return Objects.hash(condition); + } + + public static class FieldExpireOptionsBuilder { + + @Nullable Condition condition; + + @Contract("_ -> this") + public FieldExpireOptionsBuilder nx() { + this.condition = Condition.NX; + return this; + } + + @Contract("_ -> this") + public FieldExpireOptionsBuilder xx() { + this.condition = Condition.XX; + return this; + } + + @Contract("_ -> this") + public FieldExpireOptionsBuilder gt() { + this.condition = Condition.GT; + return this; + } + + @Contract("_ -> this") + public FieldExpireOptionsBuilder lt() { + this.condition = Condition.LT; + return this; + } + + @Contract("_ -> !null") + public FieldExpirationOptions build() { + return condition == null ? NONE : new FieldExpirationOptions(condition); + } + } + + public enum Condition { + + /** Set expiration only when the field has no expiration. */ + NX, + /** Set expiration only when the field has an existing expiration. */ + XX, + /** Set expiration only when the new expiration is greater than current one. */ + GT, + /** Set expiration only when the new expiration is greater than current one. */ + LT + } + } +} diff --git a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java index 7af2be01ef..5fde9d5db3 100644 --- a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java @@ -21,6 +21,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.core.Cursor; import org.springframework.data.redis.core.ScanOptions; import org.springframework.lang.Nullable; @@ -253,19 +254,28 @@ public interface RedisHashCommands { @Nullable Long hStrLen(byte[] key, byte[] field); - /** - * Set time to live for given {@code fields} in seconds. - * - * @param key must not be {@literal null}. - * @param seconds the amount of time after which the fields will be expired in seconds, must not be {@literal null}. - * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. - * @see Redis Documentation: HEXPIRE - * @since 3.5 - */ + default @Nullable List expireHashField(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, + byte[]... fields) { + return expireHashField(key, expiration, FieldExpirationOptions.none(), fields); + } + + + @Nullable List expireHashField(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, + FieldExpirationOptions options, byte[]... fields); + + /** + * Set time to live for given {@code fields} in seconds. + * + * @param key must not be {@literal null}. + * @param seconds the amount of time after which the fields will be expired in seconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ @Nullable List hExpire(byte[] key, long seconds, byte[]... fields); diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java index 9a6815460b..3326a00d62 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java @@ -15,6 +15,7 @@ */ package org.springframework.data.redis.connection.jedis; +import redis.clients.jedis.args.ExpiryOption; import redis.clients.jedis.params.ScanParams; import redis.clients.jedis.resps.ScanResult; @@ -26,13 +27,16 @@ import java.util.concurrent.TimeUnit; import org.springframework.dao.DataAccessException; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.RedisHashCommands; import org.springframework.data.redis.core.Cursor; import org.springframework.data.redis.core.ScanCursor; import org.springframework.data.redis.core.ScanIteration; import org.springframework.data.redis.core.ScanOptions; +import org.springframework.data.redis.core.types.Expiration; import org.springframework.lang.Nullable; import org.springframework.util.Assert; +import org.springframework.util.ObjectUtils; /** * Cluster {@link RedisHashCommands} implementation for Jedis. @@ -281,16 +285,54 @@ protected ScanIteration> doScan(CursorId cursorId, ScanOpt ScanParams params = JedisConverters.toScanParams(options); - ScanResult> result = connection.getCluster().hscan(key, - JedisConverters.toBytes(cursorId), + ScanResult> result = connection.getCluster().hscan(key, JedisConverters.toBytes(cursorId), params); return new ScanIteration<>(CursorId.of(result.getCursor()), result.getResult()); } }.open(); } + @Nullable + @Override + public List expireHashField(byte[] key, Expiration expiration, FieldExpirationOptions options, + byte[]... fields) { + + if (expiration.isPersistent()) { + return hPersist(key, fields); + } + + if (ObjectUtils.nullSafeEquals(FieldExpirationOptions.none(), options)) { + if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { + if (expiration.isUnixTimestamp()) { + return hpExpireAt(key, expiration.getExpirationTimeInMilliseconds(), fields); + } + return hpExpire(key, expiration.getExpirationTimeInMilliseconds(), fields); + } + if (expiration.isUnixTimestamp()) { + return hExpireAt(key, expiration.getExpirationTimeInSeconds(), fields); + } + return hExpire(key, expiration.getExpirationTimeInSeconds(), fields); + } + + ExpiryOption option = ExpiryOption.valueOf(options.getCondition().name()); + + if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { + if (expiration.isUnixTimestamp()) { + return connection.getCluster().hpexpireAt(key, expiration.getExpirationTimeInMilliseconds(), option, fields); + } + return connection.getCluster().hpexpire(key, expiration.getExpirationTimeInMilliseconds(), option, fields); + } + + if (expiration.isUnixTimestamp()) { + return connection.getCluster().hexpireAt(key, expiration.getExpirationTimeInSeconds(), option, fields); + } + return connection.getCluster().hexpire(key, expiration.getExpirationTimeInSeconds(), option, fields); + + } + @Override public List hExpire(byte[] key, long seconds, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); Assert.notNull(fields, "Fields must not be null"); @@ -368,8 +410,7 @@ public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { try { return connection.getCluster().httl(key, fields).stream() - .map(it -> it != null ? timeUnit.convert(it, TimeUnit.SECONDS) : null) - .toList(); + .map(it -> it != null ? timeUnit.convert(it, TimeUnit.SECONDS) : null).toList(); } catch (Exception ex) { throw convertJedisAccessException(ex); } diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java index 069324c9db..e8751e85cb 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java @@ -16,6 +16,7 @@ package org.springframework.data.redis.connection.jedis; import redis.clients.jedis.Jedis; +import redis.clients.jedis.args.ExpiryOption; import redis.clients.jedis.commands.PipelineBinaryCommands; import redis.clients.jedis.params.ScanParams; import redis.clients.jedis.resps.ScanResult; @@ -28,6 +29,7 @@ import java.util.concurrent.TimeUnit; import org.springframework.dao.InvalidDataAccessApiUsageException; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.RedisHashCommands; import org.springframework.data.redis.connection.convert.Converters; import org.springframework.data.redis.core.Cursor; @@ -37,6 +39,7 @@ import org.springframework.data.redis.core.ScanOptions; import org.springframework.lang.Nullable; import org.springframework.util.Assert; +import org.springframework.util.ObjectUtils; /** * {@link RedisHashCommands} implementation for Jedis. @@ -152,7 +155,8 @@ public List> hRandFieldWithValues(byte[] key, long count) List> convertedMapEntryList = new ArrayList<>(mapEntryList.size()); - mapEntryList.forEach(entry -> convertedMapEntryList.add(Converters.entryOf(entry.getKey(), entry.getValue()))); + mapEntryList + .forEach(entry -> convertedMapEntryList.add(Converters.entryOf(entry.getKey(), entry.getValue()))); return convertedMapEntryList; @@ -239,8 +243,8 @@ protected ScanIteration> doScan(byte[] key, CursorId curso ScanParams params = JedisConverters.toScanParams(options); - ScanResult> result = connection.getJedis().hscan(key, - JedisConverters.toBytes(cursorId), params); + ScanResult> result = connection.getJedis().hscan(key, JedisConverters.toBytes(cursorId), + params); return new ScanIteration<>(CursorId.of(result.getCursor()), result.getResult()); } @@ -262,6 +266,46 @@ public List hpExpire(byte[] key, long millis, byte[]... fields) { return connection.invoke().just(Jedis::hpexpire, PipelineBinaryCommands::hpexpire, key, millis, fields); } + @Override + public @Nullable List expireHashField(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, + FieldExpirationOptions options, byte[]... fields) { + + if (expiration.isPersistent()) { + return hPersist(key, fields); + } + + if (ObjectUtils.nullSafeEquals(FieldExpirationOptions.none(), options)) { + if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { + if (expiration.isUnixTimestamp()) { + return hpExpireAt(key, expiration.getExpirationTimeInMilliseconds(), fields); + } + return hpExpire(key, expiration.getExpirationTimeInMilliseconds(), fields); + } + if (expiration.isUnixTimestamp()) { + return hExpireAt(key, expiration.getExpirationTimeInSeconds(), fields); + } + return hExpire(key, expiration.getExpirationTimeInSeconds(), fields); + } + + ExpiryOption option = ExpiryOption.valueOf(options.getCondition().name()); + + if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { + if (expiration.isUnixTimestamp()) { + return connection.invoke().just(Jedis::hpexpireAt, PipelineBinaryCommands::hpexpireAt, key, + expiration.getExpirationTimeInMilliseconds(), option, fields); + } + return connection.invoke().just(Jedis::hpexpire, PipelineBinaryCommands::hpexpire, key, + expiration.getExpirationTimeInMilliseconds(), option, fields); + } + + if (expiration.isUnixTimestamp()) { + return connection.invoke().just(Jedis::hexpireAt, PipelineBinaryCommands::hexpireAt, key, + expiration.getExpirationTimeInSeconds(), option, fields); + } + return connection.invoke().just(Jedis::hexpire, PipelineBinaryCommands::hexpire, key, + expiration.getExpirationTimeInSeconds(), option, fields); + } + @Override public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { return connection.invoke().just(Jedis::hexpireAt, PipelineBinaryCommands::hexpireAt, key, unixTime, fields); @@ -269,7 +313,8 @@ public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { @Override public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { - return connection.invoke().just(Jedis::hpexpireAt, PipelineBinaryCommands::hpexpireAt, key, unixTimeInMillis, fields); + return connection.invoke().just(Jedis::hpexpireAt, PipelineBinaryCommands::hpexpireAt, key, unixTimeInMillis, + fields); } @Override diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java index 61c6b8501a..bebb1f4fd0 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java @@ -15,10 +15,12 @@ */ package org.springframework.data.redis.connection.lettuce; +import io.lettuce.core.ExpireArgs; import io.lettuce.core.KeyValue; import io.lettuce.core.MapScanCursor; import io.lettuce.core.ScanArgs; import io.lettuce.core.api.async.RedisHashAsyncCommands; +import io.lettuce.core.protocol.CommandArgs; import java.util.List; import java.util.Map; @@ -27,6 +29,7 @@ import java.util.concurrent.TimeUnit; import org.springframework.dao.InvalidDataAccessApiUsageException; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.RedisHashCommands; import org.springframework.data.redis.connection.convert.Converters; import org.springframework.data.redis.core.Cursor; @@ -36,6 +39,7 @@ import org.springframework.data.redis.core.ScanOptions; import org.springframework.lang.Nullable; import org.springframework.util.Assert; +import org.springframework.util.ObjectUtils; /** * @author Christoph Strobl @@ -210,6 +214,54 @@ public Cursor> hScan(byte[] key, ScanOptions options) { return hScan(key, CursorId.initial(), options); } + @Override + public @Nullable List expireHashField(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, + FieldExpirationOptions options, byte[]... fields) { + + if (expiration.isPersistent()) { + return hPersist(key, fields); + } + + if (ObjectUtils.nullSafeEquals(FieldExpirationOptions.none(), options)) { + if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { + if (expiration.isUnixTimestamp()) { + return hpExpireAt(key, expiration.getExpirationTimeInMilliseconds(), fields); + } + return hpExpire(key, expiration.getExpirationTimeInMilliseconds(), fields); + } + if (expiration.isUnixTimestamp()) { + return hExpireAt(key, expiration.getExpirationTimeInSeconds(), fields); + } + return hExpire(key, expiration.getExpirationTimeInSeconds(), fields); + } + + ExpireArgs option = new ExpireArgs() { + @Override + public void build(CommandArgs args) { + args.add(options.getCondition().name()); + } + }; + + if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { + if (expiration.isUnixTimestamp()) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hpexpireat, key, + expiration.getExpirationTimeInMilliseconds(), option, fields).toList(); + } + return connection.invoke() + .fromMany(RedisHashAsyncCommands::hpexpire, key, expiration.getExpirationTimeInMilliseconds(), option, fields) + .toList(); + } + + if (expiration.isUnixTimestamp()) { + return connection.invoke() + .fromMany(RedisHashAsyncCommands::hexpireat, key, expiration.getExpirationTimeInSeconds(), option, fields) + .toList(); + } + return connection.invoke() + .fromMany(RedisHashAsyncCommands::hexpire, key, expiration.getExpirationTimeInSeconds(), option, fields) + .toList(); + } + @Override public List hExpire(byte[] key, long seconds, byte[]... fields) { return connection.invoke().fromMany(RedisHashAsyncCommands::hexpire, key, seconds, fields).toList(); @@ -248,8 +300,7 @@ public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { @Override public List hpTtl(byte[] key, byte[]... fields) { - return connection.invoke().fromMany(RedisHashAsyncCommands::hpttl, key, fields) - .toList(); + return connection.invoke().fromMany(RedisHashAsyncCommands::hpttl, key, fields).toList(); } /** diff --git a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java index 48079707b7..0503c33094 100644 --- a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java @@ -23,6 +23,8 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; +import org.springframework.data.redis.core.types.Expiration; import org.springframework.lang.Nullable; /** @@ -157,6 +159,12 @@ public interface BoundHashOperations extends BoundKeyOperations { @Nullable Long lengthOfValue(HK hashKey); + default ExpireChanges expire(Expiration expiration, Collection hashKeys) { + return expire(expiration, FieldExpirationOptions.none(), hashKeys); + } + + ExpireChanges expire(Expiration expiration, FieldExpirationOptions options, Collection hashKeys); + /** * Set time to live for given {@code hashKey} (aka field). * diff --git a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java index 88c76c529d..2be7e0bd3f 100644 --- a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java @@ -27,8 +27,10 @@ import java.util.concurrent.TimeUnit; import org.springframework.core.convert.converter.Converter; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.convert.Converters; import org.springframework.data.redis.core.Expirations.Timeouts; +import org.springframework.data.redis.core.types.Expiration; import org.springframework.lang.Nullable; import org.springframework.util.Assert; @@ -251,6 +253,18 @@ public ExpireChanges expireAt(K key, Instant instant, Collection hashKey return raw != null ? ExpireChanges.of(orderedKeys, raw) : null; } + @Override + public ExpireChanges expire(K key, Expiration expiration, FieldExpirationOptions options, Collection hashKeys) { + + List orderedKeys = List.copyOf(hashKeys); + + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); + List raw = execute(connection -> connection.hashCommands().expireHashField(rawKey, expiration, options, rawHashKeys)); + + return raw != null ? ExpireChanges.of(orderedKeys, raw) : null; + } + @Override public ExpireChanges persist(K key, Collection hashKeys) { diff --git a/src/main/java/org/springframework/data/redis/core/Expirations.java b/src/main/java/org/springframework/data/redis/core/Expirations.java index a2475e4fa0..958f90e3a8 100644 --- a/src/main/java/org/springframework/data/redis/core/Expirations.java +++ b/src/main/java/org/springframework/data/redis/core/Expirations.java @@ -41,7 +41,7 @@ * @author Christoph Strobl * @since 3.5 */ -public class Expirations { +public class Expirations { // TODO: should we move this to let's say Hash.class or another place private final TimeUnit unit; private final Map expirations; @@ -182,7 +182,7 @@ public int size() { * {@link #PERSISTENT} mark predefined states returned by Redis indicating a time to live value could not be retrieved * due to various reasons. */ - public static class Expiration { + public static class Expiration { // TODO: is Expiry a better name for this type? private final long raw; @Nullable TimeUnit sourceUnit; diff --git a/src/main/java/org/springframework/data/redis/core/HashOperations.java b/src/main/java/org/springframework/data/redis/core/HashOperations.java index 26767be47f..c32c33983c 100644 --- a/src/main/java/org/springframework/data/redis/core/HashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/HashOperations.java @@ -23,6 +23,8 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; +import org.springframework.data.redis.core.types.Expiration; import org.springframework.lang.Nullable; /** @@ -259,6 +261,8 @@ public interface HashOperations { @Nullable ExpireChanges expireAt(H key, Instant expireAt, Collection hashKeys); + ExpireChanges expire(H key, Expiration expiration, FieldExpirationOptions options, Collection hashKeys); + /** * Remove the expiration from given {@code hashKey} (aka field). * diff --git a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java index 88764daa33..db3d57e903 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java @@ -30,9 +30,11 @@ import org.assertj.core.api.InstanceOfAssertFactories; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.springframework.data.redis.ObjectFactory; import org.springframework.data.redis.RawObjectFactory; import org.springframework.data.redis.StringObjectFactory; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.jedis.JedisConnectionFactory; import org.springframework.data.redis.connection.jedis.extension.JedisConnectionFactoryExtension; import org.springframework.data.redis.core.Expirations.Expiration; @@ -302,6 +304,44 @@ void expireThrowsErrorOfNanoPrecision() { .isThrownBy(() -> redisTemplate.opsForHash().getExpire(key, TimeUnit.NANOSECONDS, List.of(key1))); } + @ParameterizedRedisTest + void testExpireWithOptionsNone() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + ExpireChanges expire = redisTemplate.opsForHash().expire(key, org.springframework.data.redis.core.types.Expiration.seconds(20), FieldExpirationOptions.none(), List.of(key1)); + + assertThat(expire.allOk()).isTrue(); + } + + @ParameterizedRedisTest + void testExpireWithOptions() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + redisTemplate.opsForHash().expire(key, org.springframework.data.redis.core.types.Expiration.seconds(20), FieldExpirationOptions.none(), List.of(key1)); + redisTemplate.opsForHash().expire(key, org.springframework.data.redis.core.types.Expiration.seconds(60), FieldExpirationOptions.none(), List.of(key2)); + + ExpireChanges changes = redisTemplate.opsForHash().expire(key, org.springframework.data.redis.core.types.Expiration.seconds(30), FieldExpirationOptions.builder().gt().build(), List.of(key1, key2)); + + assertThat(changes.ok()).containsExactly(key1); + assertThat(changes.skipped()).containsExactly(key2); + } + @ParameterizedRedisTest @EnabledOnCommand("HEXPIRE") void testPersistAndGetExpireMillis() { From 8c5e3a246dbde5ff0c1b0a099ae5762fc7d18504 Mon Sep 17 00:00:00 2001 From: Christoph Strobl Date: Tue, 11 Feb 2025 10:13:14 +0100 Subject: [PATCH 06/13] Add field expiration options to reactive API. --- .../connection/ReactiveHashCommands.java | 321 +++++++----------- .../lettuce/LettuceHashCommands.java | 18 +- .../lettuce/LettuceReactiveHashCommands.java | 65 ++-- .../core/DefaultReactiveHashOperations.java | 18 +- .../redis/core/ReactiveHashOperations.java | 4 + .../data/redis/core/types/Expiration.java | 5 +- ...DefaultHashOperationsIntegrationTests.java | 1 + ...eactiveHashOperationsIntegrationTests.java | 29 ++ 8 files changed, 206 insertions(+), 255 deletions(-) diff --git a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java index 35e3437141..f58f4e32a9 100644 --- a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java @@ -26,10 +26,12 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import org.reactivestreams.Publisher; import org.springframework.dao.InvalidDataAccessApiUsageException; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.ReactiveRedisConnection.BooleanResponse; import org.springframework.data.redis.connection.ReactiveRedisConnection.Command; import org.springframework.data.redis.connection.ReactiveRedisConnection.CommandResponse; @@ -38,6 +40,7 @@ import org.springframework.data.redis.connection.ReactiveRedisConnection.MultiValueResponse; import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse; import org.springframework.data.redis.core.ScanOptions; +import org.springframework.data.redis.core.types.Expiration; import org.springframework.lang.Nullable; import org.springframework.util.Assert; @@ -846,54 +849,59 @@ default Mono hStrLen(ByteBuffer key, ByteBuffer field) { Flux> hStrLen(Publisher commands); /** - * @author Tihomir Mateev - * @see Redis Documentation: HEXPIRE * @since 3.5 */ - class Expire extends HashFieldsCommand { + class ExpireCommand extends HashFieldsCommand { - private final Duration ttl; - - /** - * Creates a new {@link Expire} given a {@code key}, a {@link List} of {@code fields} and a time-to-live - * - * @param key can be {@literal null}. - * @param fields must not be {@literal null}. - * @param ttl the duration of the time to live. - */ - private Expire(@Nullable ByteBuffer key, List fields, Duration ttl) { + private final Expiration expiration; + private final FieldExpirationOptions options; + private ExpireCommand(@Nullable ByteBuffer key, List fields, Expiration expiration, + FieldExpirationOptions options) { super(key, fields); - this.ttl = ttl; + this.expiration = expiration; + this.options = options; } - /** - * Specify the {@code fields} within the hash to set an expiration for. - * - * @param fields must not be {@literal null}. - * @return new instance of {@link Expire}. - */ - public static Expire expire(List fields, Duration ttl) { + public static ExpireCommand expire(List fields, long timeout, TimeUnit unit) { Assert.notNull(fields, "Field must not be null"); - return new Expire(null, fields, ttl); + return expire(fields, Expiration.from(timeout, unit)); } - /** - * Define the {@code key} the hash is stored at. - * - * @param key must not be {@literal null}. - * @return new instance of {@link Expire}. - */ - public Expire from(ByteBuffer key) { - return new Expire(key, getFields(), ttl); + public static ExpireCommand expire(List fields, Duration ttl) { + + Assert.notNull(fields, "Field must not be null"); + return expire(fields, Expiration.from(ttl)); } - /** - * @return the ttl. - */ - public Duration getTtl() { - return ttl; + public static ExpireCommand expire(List fields, Expiration expiration) { + return new ExpireCommand(null, fields, expiration, FieldExpirationOptions.none()); + } + + public static ExpireCommand expireAt(List fields, Instant ttl, TimeUnit precision) { + + if (precision.compareTo(TimeUnit.MILLISECONDS) > 0) { + return expire(fields, Expiration.unixTimestamp(ttl.getEpochSecond(), TimeUnit.SECONDS)); + } + + return expire(fields, Expiration.unixTimestamp(ttl.toEpochMilli(), TimeUnit.MILLISECONDS)); + } + + public ExpireCommand from(ByteBuffer key) { + return new ExpireCommand(key, getFields(), expiration, options); + } + + public ExpireCommand withOptions(FieldExpirationOptions options) { + return new ExpireCommand(getKey(), getFields(), getExpiration(), options); + } + + public Expiration getExpiration() { + return expiration; + } + + public FieldExpirationOptions getOptions() { + return options; } } @@ -903,51 +911,53 @@ public Duration getTtl() { * @param key must not be {@literal null}. * @param field must not be {@literal null}. * @param duration must not be {@literal null}. - * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted already + * due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; + * {@code -2} indicating there is no such field; * @see Redis Documentation: HEXPIRE * @since 3.5 */ default Mono hExpire(ByteBuffer key, Duration duration, ByteBuffer field) { - Assert.notNull(duration, "Duration must not be null"); + Assert.notNull(duration, "Duration must not be null"); return hExpire(key, duration, Collections.singletonList(field)).singleOrEmpty(); } /** - * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has + * passed. * * @param key must not be {@literal null}. * @param fields must not be {@literal null}. * @param duration must not be {@literal null}. - * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; * @see Redis Documentation: HEXPIRE * @since 3.5 */ default Flux hExpire(ByteBuffer key, Duration duration, List fields) { Assert.notNull(duration, "Duration must not be null"); - return hExpire(Flux.just(Expire.expire(fields, duration).from(key))) + return expireHashField(Flux.just(ExpireCommand.expire(fields, duration).from(key))) .mapNotNull(NumericResponse::getOutput); } /** - * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has + * passed. * * @param commands must not be {@literal null}. - * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; * @since 3.5 * @see Redis Documentation: HEXPIRE */ - Flux> hExpire(Publisher commands); + Flux> expireHashField(Publisher commands); /** * Expire a given {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. @@ -955,104 +965,41 @@ default Flux hExpire(ByteBuffer key, Duration duration, List f * @param key must not be {@literal null}. * @param field must not be {@literal null}. * @param duration must not be {@literal null}. - * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted already + * due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; + * {@code -2} indicating there is no such field; * @see Redis Documentation: HEXPIRE * @since 3.5 */ default Mono hpExpire(ByteBuffer key, Duration duration, ByteBuffer field) { - Assert.notNull(duration, "Duration must not be null"); + Assert.notNull(duration, "Duration must not be null"); return hpExpire(key, duration, Collections.singletonList(field)).singleOrEmpty(); } /** - * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has + * passed. * * @param key must not be {@literal null}. * @param fields must not be {@literal null}. * @param duration must not be {@literal null}. - * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; * @see Redis Documentation: HEXPIRE * @since 3.5 */ default Flux hpExpire(ByteBuffer key, Duration duration, List fields) { - Assert.notNull(duration, "Duration must not be null"); - return hpExpire(Flux.just(Expire.expire(fields, duration).from(key))) + Assert.notNull(duration, "Duration must not be null"); + return expireHashField(Flux.just(new ExpireCommand(key, fields, + Expiration.from(duration.toMillis(), TimeUnit.MILLISECONDS), FieldExpirationOptions.none()))) .mapNotNull(NumericResponse::getOutput); } - /** - * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. - * - * @param commands must not be {@literal null}. - * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; - * @since 3.5 - * @see Redis Documentation: HEXPIRE - */ - Flux> hpExpire(Publisher commands); - - /** - * @author Tihomir Mateev - * @see Redis Documentation: HEXPIREAT - * @since 3.5 - */ - class ExpireAt extends HashFieldsCommand { - - private final Instant expireAt; - - /** - * Creates a new {@link ExpireAt} given a {@code key}, a {@link List} of {@literal fields} and a {@link Instant} - * - * @param key can be {@literal null}. - * @param fields must not be {@literal null}. - * @param expireAt the {@link Instant} to expire at. - */ - private ExpireAt(@Nullable ByteBuffer key, List fields, Instant expireAt) { - - super(key, fields); - this.expireAt = expireAt; - } - - /** - * Specify the {@code fields} within the hash to set an expiration for. - * - * @param fields must not be {@literal null}. - * @return new instance of {@link ExpireAt}. - */ - public static ExpireAt expireAt(List fields, Instant expireAt) { - - Assert.notNull(fields, "Fields must not be null"); - return new ExpireAt(null, fields, expireAt); - } - - /** - * Define the {@code key} the hash is stored at. - * - * @param key must not be {@literal null}. - * @return new instance of {@link ExpireAt}. - */ - public ExpireAt from(ByteBuffer key) { - return new ExpireAt(key, getFields(), expireAt); - } - - /** - * @return the ttl. - */ - public Instant getExpireAt() { - return expireAt; - } - } - /** * Expire a given {@literal field} in a given {@link Instant} of time, indicated as an absolute * Unix timestamp in seconds since Unix epoch @@ -1060,10 +1007,10 @@ public Instant getExpireAt() { * @param key must not be {@literal null}. * @param field must not be {@literal null}. * @param expireAt must not be {@literal null}. - * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is - * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not - * met); {@code -2} indicating there is no such field; + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted already + * due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is + * not met); {@code -2} indicating there is no such field; * @see Redis Documentation: HEXPIREAT * @since 3.5 */ @@ -1080,33 +1027,20 @@ default Mono hExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field) * @param key must not be {@literal null}. * @param fields must not be {@literal null}. * @param expireAt must not be {@literal null}. - * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is - * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not - * met); {@code -2} indicating there is no such field; + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; * @see Redis Documentation: HEXPIREAT * @since 3.5 */ default Flux hExpireAt(ByteBuffer key, Instant expireAt, List fields) { Assert.notNull(expireAt, "Duration must not be null"); - return hExpireAt(Flux.just(ExpireAt.expireAt(fields, expireAt).from(key))).mapNotNull(NumericResponse::getOutput); + return expireHashField(Flux.just(ExpireCommand.expireAt(fields, expireAt, TimeUnit.SECONDS).from(key))) + .mapNotNull(NumericResponse::getOutput); } - /** - * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute - * Unix timestamp in seconds since Unix epoch - * - * @param commands must not be {@literal null}. - * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is - * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not - * met); {@code -2} indicating there is no such field; - * @since 3.5 - * @see Redis Documentation: HEXPIREAT - */ - Flux> hExpireAt(Publisher commands); - /** * Expire a given {@literal field} in a given {@link Instant} of time, indicated as an absolute * Unix timestamp in milliseconds since Unix epoch @@ -1114,10 +1048,10 @@ default Flux hExpireAt(ByteBuffer key, Instant expireAt, List * @param key must not be {@literal null}. * @param field must not be {@literal null}. * @param expireAt must not be {@literal null}. - * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is - * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not - * met); {@code -2} indicating there is no such field; + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted already + * due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is + * not met); {@code -2} indicating there is no such field; * @see Redis Documentation: HPEXPIREAT * @since 3.5 */ @@ -1134,47 +1068,32 @@ default Mono hpExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field * @param key must not be {@literal null}. * @param fields must not be {@literal null}. * @param expireAt must not be {@literal null}. - * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is - * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not - * met); {@code -2} indicating there is no such field; + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; * @see Redis Documentation: HPEXPIREAT * @since 3.5 */ default Flux hpExpireAt(ByteBuffer key, Instant expireAt, List fields) { Assert.notNull(expireAt, "Duration must not be null"); - return hpExpireAt(Flux.just(ExpireAt.expireAt(fields, expireAt).from(key))).mapNotNull(NumericResponse::getOutput); + return expireHashField(Flux.just(ExpireCommand.expireAt(fields, expireAt, TimeUnit.MILLISECONDS).from(key))) + .mapNotNull(NumericResponse::getOutput); } - /** - * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute - * Unix timestamp in milliseconds since Unix epoch - * - * @param commands must not be {@literal null}. - * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is - * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not - * met); {@code -2} indicating there is no such field; - * @since 3.5 - * @see Redis Documentation: HPEXPIREAT - */ - Flux> hpExpireAt(Publisher commands); - /** * Persist a given {@literal field} removing any associated expiration, measured as absolute * Unix timestamp in seconds since Unix epoch * * @param key must not be {@literal null}. * @param field must not be {@literal null}. - * @return a {@link Mono} emitting the persist result - {@code 1} indicating expiration time is removed; - * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; - * + * @return a {@link Mono} emitting the persist result - {@code 1} indicating expiration time is removed; {@code -1} + * field has no expiration time to be removed; {@code -2} indicating there is no such field; * @see Redis Documentation: HPERSIST * @since 3.5 */ default Mono hPersist(ByteBuffer key, ByteBuffer field) { - return hPersist(key, Collections.singletonList(field)).singleOrEmpty(); } @@ -1183,14 +1102,13 @@ default Mono hPersist(ByteBuffer key, ByteBuffer field) { * * @param key must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a {@link Flux} emitting the persisting results one by one - {@code 1} indicating expiration time is removed; - * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; - * + * @return a {@link Flux} emitting the persisting results one by one - {@code 1} indicating expiration time is + * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such + * field; * @see Redis Documentation: HPERSIST * @since 3.5 */ default Flux hPersist(ByteBuffer key, List fields) { - return hPersist(Flux.just(new HashFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput); } @@ -1198,9 +1116,9 @@ default Flux hPersist(ByteBuffer key, List fields) { * Persist a given {@link List} of {@literal field} removing any associated expiration. * * @param commands must not be {@literal null}. - * @return a {@link Flux} emitting the persisting results one by one - {@code 1} indicating expiration time is removed; - * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; - * * @since 3.5 + * @return a {@link Flux} emitting the persisting results one by one - {@code 1} indicating expiration time is + * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such + * field; * @since 3.5 * @see Redis Documentation: HPERSIST */ Flux> hPersist(Publisher commands); @@ -1210,9 +1128,9 @@ default Flux hPersist(ByteBuffer key, List fields) { * * @param key must not be {@literal null}. * @param field must not be {@literal null}. - * @return a {@link Mono} emitting the TTL result - the time to live in seconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. - * The command returns {@code -2} if the key does not exist; + * @return a {@link Mono} emitting the TTL result - the time to live in seconds; or a negative value to signal an + * error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; * @see Redis Documentation: HTTL * @since 3.5 */ @@ -1226,9 +1144,9 @@ default Mono hTtl(ByteBuffer key, ByteBuffer field) { * * @param key must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a {@link Flux} emitting the TTL results one by one - the time to live in seconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. - * The command returns {@code -2} if the key does not exist; + * @return a {@link Flux} emitting the TTL results one by one - the time to live in seconds; or a negative value to + * signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; * @see Redis Documentation: HTTL * @since 3.5 */ @@ -1241,23 +1159,22 @@ default Flux hTtl(ByteBuffer key, List fields) { * Returns the time-to-live of all the given {@literal field} in the {@link List} in seconds. * * @param commands must not be {@literal null}. - * @return a {@link Flux} emitting the persisting results one by one - the time to live in seconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. - * The command returns {@code -2} if the key does not exist; + * @return a {@link Flux} emitting the persisting results one by one - the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; * @since 3.5 * @see Redis Documentation: HTTL */ Flux> hTtl(Publisher commands); - /** * Returns the time-to-live of a given {@literal field} in milliseconds. * * @param key must not be {@literal null}. * @param field must not be {@literal null}. - * @return a {@link Mono} emitting the TTL result - the time to live in milliseconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. - * The command returns {@code -2} if the key does not exist; + * @return a {@link Mono} emitting the TTL result - the time to live in milliseconds; or a negative value to signal an + * error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; * @see Redis Documentation: HPTTL * @since 3.5 */ @@ -1272,8 +1189,8 @@ default Mono hpTtl(ByteBuffer key, ByteBuffer field) { * @param key must not be {@literal null}. * @param fields must not be {@literal null}. * @return a {@link Flux} emitting the TTL results one by one - the time to live in milliseconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. - * The command returns {@code -2} if the key does not exist; + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; * @see Redis Documentation: HPTTL * @since 3.5 */ @@ -1286,9 +1203,9 @@ default Flux hpTtl(ByteBuffer key, List fields) { * Returns the time-to-live of all the given {@literal field} in the {@link List} in milliseconds. * * @param commands must not be {@literal null}. - * @return a {@link Flux} emitting the persisting results one by one - the time to live in milliseconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. - * The command returns {@code -2} if the key does not exist; + * @return a {@link Flux} emitting the persisting results one by one - the time to live in milliseconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; * @since 3.5 * @see Redis Documentation: HPTTL */ diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java index bebb1f4fd0..16564fd1eb 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java @@ -222,22 +222,14 @@ public Cursor> hScan(byte[] key, ScanOptions options) { return hPersist(key, fields); } - if (ObjectUtils.nullSafeEquals(FieldExpirationOptions.none(), options)) { - if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { - if (expiration.isUnixTimestamp()) { - return hpExpireAt(key, expiration.getExpirationTimeInMilliseconds(), fields); - } - return hpExpire(key, expiration.getExpirationTimeInMilliseconds(), fields); - } - if (expiration.isUnixTimestamp()) { - return hExpireAt(key, expiration.getExpirationTimeInSeconds(), fields); - } - return hExpire(key, expiration.getExpirationTimeInSeconds(), fields); - } - ExpireArgs option = new ExpireArgs() { @Override public void build(CommandArgs args) { + + if(ObjectUtils.nullSafeEquals(options, FieldExpirationOptions.none())) { + return; + } + args.add(options.getCondition().name()); } }; diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java index 33e9c162e1..3cc7bfd9c3 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java @@ -15,8 +15,10 @@ */ package org.springframework.data.redis.connection.lettuce; +import io.lettuce.core.ExpireArgs; import io.lettuce.core.KeyValue; import io.lettuce.core.ScanStream; +import io.lettuce.core.protocol.CommandArgs; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -25,10 +27,11 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.reactivestreams.Publisher; - +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.ReactiveHashCommands; import org.springframework.data.redis.connection.ReactiveRedisConnection.BooleanResponse; import org.springframework.data.redis.connection.ReactiveRedisConnection.CommandResponse; @@ -38,6 +41,7 @@ import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse; import org.springframework.data.redis.connection.convert.Converters; import org.springframework.util.Assert; +import org.springframework.util.ObjectUtils; /** * @author Christoph Strobl @@ -265,49 +269,48 @@ public Flux> hStrLen(Publisher> hExpire(Publisher commands) { + public Flux> expireHashField(Publisher commands) { return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notNull(command.getKey(), "Key must not be null"); Assert.notNull(command.getFields(), "Fields must not be null"); - return cmd.hexpire(command.getKey(), command.getTtl().toSeconds(), command.getFields().toArray(ByteBuffer[]::new)) - .map(value -> new NumericResponse<>(command, value)); - })); - } - - @Override - public Flux> hpExpire(Publisher commands) { - return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + ByteBuffer[] fields = command.getFields().toArray(ByteBuffer[]::new); - Assert.notNull(command.getKey(), "Key must not be null"); - Assert.notNull(command.getFields(), "Fields must not be null"); + if (command.getExpiration().isPersistent()) { + return cmd.hpersist(command.getKey(), fields).map(value -> new NumericResponse<>(command, value)); + } - return cmd.hpexpire(command.getKey(), command.getTtl().toMillis(), command.getFields().toArray(ByteBuffer[]::new)) - .map(value -> new NumericResponse<>(command, value)); - })); - } + ExpireArgs args = new ExpireArgs() { - @Override - public Flux> hExpireAt(Publisher commands) { - return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + @Override + public void build(CommandArgs args) { + super.build(args); + if (ObjectUtils.nullSafeEquals(command.getOptions(), FieldExpirationOptions.none())) { + return; + } - Assert.notNull(command.getKey(), "Key must not be null"); - Assert.notNull(command.getFields(), "Fields must not be null"); + args.add(command.getOptions().getCondition().name()); + } + }; - return cmd.hexpireat(command.getKey(), command.getExpireAt().getEpochSecond(), command.getFields().toArray(ByteBuffer[]::new)) - .map(value -> new NumericResponse<>(command, value)); - })); - } + if (command.getExpiration().isUnixTimestamp()) { - @Override - public Flux> hpExpireAt(Publisher commands) { - return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + if (command.getExpiration().getTimeUnit().equals(TimeUnit.MILLISECONDS)) { + return cmd + .hpexpireat(command.getKey(), command.getExpiration().getExpirationTimeInMilliseconds(), args, fields) + .map(value -> new NumericResponse<>(command, value)); + } + return cmd.hexpireat(command.getKey(), command.getExpiration().getExpirationTimeInSeconds(), args, fields) + .map(value -> new NumericResponse<>(command, value)); + } - Assert.notNull(command.getKey(), "Key must not be null"); - Assert.notNull(command.getFields(), "Fields must not be null"); + if (command.getExpiration().getTimeUnit().equals(TimeUnit.MILLISECONDS)) { + return cmd.hpexpire(command.getKey(), command.getExpiration().getExpirationTimeInMilliseconds(), args, fields) + .map(value -> new NumericResponse<>(command, value)); + } - return cmd.hpexpireat(command.getKey(), command.getExpireAt().toEpochMilli(), command.getFields().toArray(ByteBuffer[]::new)) + return cmd.hexpire(command.getKey(), command.getExpiration().getExpirationTimeInSeconds(), args, fields) .map(value -> new NumericResponse<>(command, value)); })); } diff --git a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java index 070127c235..d373a7f063 100644 --- a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java @@ -15,6 +15,10 @@ */ package org.springframework.data.redis.core; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; +import org.springframework.data.redis.connection.ReactiveHashCommands.ExpireCommand; +import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse; +import org.springframework.data.redis.core.types.Expiration; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -239,18 +243,18 @@ public Flux> scan(H key, ScanOptions options) { @Override public Mono> expire(H key, Duration timeout, Collection hashKeys) { + return expire(key, Expiration.from(timeout), FieldExpirationOptions.none(), hashKeys); + } + + @Override + public Mono> expire(H key, Expiration expiration, FieldExpirationOptions options, Collection hashKeys) { List orderedKeys = List.copyOf(hashKeys); ByteBuffer rawKey = rawKey(key); List rawHashKeys = orderedKeys.stream().map(this::rawHashKey).toList(); - Mono> raw = createFlux(connection -> { - - if (TimeoutUtils.hasMillis(timeout)) { - return connection.hpExpire(rawKey, timeout, rawHashKeys); - } - - return connection.hExpire(rawKey, timeout, rawHashKeys); + Mono> raw =createFlux(connection -> { + return connection.expireHashField(Mono.just(ExpireCommand.expire(rawHashKeys, expiration).from(rawKey).withOptions(options))).map(NumericResponse::getOutput); }).collectList(); return raw.map(values -> ExpireChanges.of(orderedKeys, values)); diff --git a/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java b/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java index 5bac3e9d9e..2d0cbcaf10 100644 --- a/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java @@ -15,6 +15,8 @@ */ package org.springframework.data.redis.core; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; +import org.springframework.data.redis.core.types.Expiration; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -237,6 +239,8 @@ default Flux> scan(H key) { Mono> expire(H key, Duration timeout, Collection hashKeys); + Mono> expire(H key, Expiration expiration, FieldExpirationOptions options, Collection hashKeys); + /** * Set the expiration for given {@code hashKey} (aka field) as a {@literal date} timestamp. * diff --git a/src/main/java/org/springframework/data/redis/core/types/Expiration.java b/src/main/java/org/springframework/data/redis/core/types/Expiration.java index 74eb9c3838..a68dd516b8 100644 --- a/src/main/java/org/springframework/data/redis/core/types/Expiration.java +++ b/src/main/java/org/springframework/data/redis/core/types/Expiration.java @@ -19,6 +19,7 @@ import java.util.Objects; import java.util.concurrent.TimeUnit; +import org.springframework.data.redis.core.TimeoutUtils; import org.springframework.lang.Nullable; import org.springframework.util.Assert; import org.springframework.util.ObjectUtils; @@ -105,8 +106,8 @@ public static Expiration from(Duration duration) { Assert.notNull(duration, "Duration must not be null"); return duration.isZero() ? Expiration.persistent() - : duration.toMillis() % 1000 == 0 ? new Expiration(duration.getSeconds(), TimeUnit.SECONDS) - : new Expiration(duration.toMillis(), TimeUnit.MILLISECONDS); + : TimeoutUtils.hasMillis(duration) ? new Expiration(duration.toMillis(), TimeUnit.MILLISECONDS) + : new Expiration(duration.getSeconds(), TimeUnit.SECONDS); } /** diff --git a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java index db3d57e903..4abd23dacf 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java @@ -322,6 +322,7 @@ void testExpireWithOptionsNone() { } @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") void testExpireWithOptions() { K key = keyFactory.instance(); diff --git a/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java index fc73f9865e..a128a29293 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java @@ -19,6 +19,7 @@ import static org.assertj.core.api.Assumptions.assumeThat; import static org.junit.jupiter.api.condition.OS.MAC; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import reactor.test.StepVerifier; import java.time.Duration; @@ -529,6 +530,34 @@ void testExpireAndGetExpireMillis() { }).verifyComplete(); } + @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") + void testExpireWithOptions() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + putAll(key, key1, val1, key2, val2); + + hashOperations.expire(key, org.springframework.data.redis.core.types.Expiration.seconds(20), FieldExpirationOptions.none(), List.of(key1)).as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + hashOperations.expire(key, org.springframework.data.redis.core.types.Expiration.seconds(60), FieldExpirationOptions.none(), List.of(key2)).as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + + hashOperations.expire(key, org.springframework.data.redis.core.types.Expiration.seconds(30), FieldExpirationOptions.builder().gt().build(), List.of(key1, key2)).as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.ok()).containsExactly(key1); + assertThat(changes.skipped()).containsExactly(key2); + }).verifyComplete(); + } + @ParameterizedRedisTest @EnabledOnCommand("HEXPIRE") void testExpireAndGetExpireSeconds() { From 7f39778dd4dc33f02a224b89c9614a19051165dd Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Tue, 11 Feb 2025 11:32:38 +0100 Subject: [PATCH 07/13] Polishing. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce command variants with Condition to avoid duplicate expireHashField(…) implementations. Move expireHashField to default method and rename it to applyExpiration(…). Rename Expiration to TimeToLive and methods to getTimeToLive(…). Fix since tags. --- .../DefaultStringRedisConnection.java | 57 ++-- .../connection/DefaultedRedisConnection.java | 52 +++- .../data/redis/connection/Hash.java | 48 ++-- .../connection/ReactiveHashCommands.java | 25 +- .../redis/connection/RedisHashCommands.java | 244 +++++++++++++----- .../connection/StringRedisConnection.java | 139 +++++++--- .../jedis/JedisClusterHashCommands.java | 83 +++--- .../connection/jedis/JedisHashCommands.java | 69 ++--- .../lettuce/LettuceHashCommands.java | 74 ++---- .../lettuce/LettuceReactiveHashCommands.java | 6 +- .../data/redis/core/BoundHashOperations.java | 62 ++--- .../redis/core/DefaultHashOperations.java | 21 +- .../core/DefaultReactiveHashOperations.java | 15 +- .../data/redis/core/Expirations.java | 128 ++++----- .../data/redis/core/ExpireChanges.java | 22 +- .../data/redis/core/HashOperations.java | 82 +++--- .../redis/core/ReactiveHashOperations.java | 28 +- .../data/redis/core/RedisOperations.java | 1 + .../support/collections/DefaultRedisMap.java | 19 +- .../redis/support/collections/RedisMap.java | 60 +++-- .../support/collections/RedisProperties.java | 20 +- ...DefaultHashOperationsIntegrationTests.java | 25 +- ...eactiveHashOperationsIntegrationTests.java | 17 +- .../data/redis/core/ExpirationsUnitTest.java | 5 +- .../AbstractRedisMapIntegrationTests.java | 16 +- 25 files changed, 797 insertions(+), 521 deletions(-) diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java index 00a504a3ac..17dc41378d 100644 --- a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; + import org.springframework.core.convert.converter.Converter; import org.springframework.data.geo.Circle; import org.springframework.data.geo.Distance; @@ -35,10 +36,19 @@ import org.springframework.data.redis.connection.convert.ListConverter; import org.springframework.data.redis.connection.convert.MapConverter; import org.springframework.data.redis.connection.convert.SetConverter; -import org.springframework.data.redis.connection.stream.*; +import org.springframework.data.redis.connection.stream.ByteRecord; +import org.springframework.data.redis.connection.stream.Consumer; +import org.springframework.data.redis.connection.stream.MapRecord; +import org.springframework.data.redis.connection.stream.PendingMessages; +import org.springframework.data.redis.connection.stream.PendingMessagesSummary; +import org.springframework.data.redis.connection.stream.ReadOffset; +import org.springframework.data.redis.connection.stream.RecordId; import org.springframework.data.redis.connection.stream.StreamInfo.XInfoConsumers; import org.springframework.data.redis.connection.stream.StreamInfo.XInfoGroups; import org.springframework.data.redis.connection.stream.StreamInfo.XInfoStream; +import org.springframework.data.redis.connection.stream.StreamOffset; +import org.springframework.data.redis.connection.stream.StreamReadOptions; +import org.springframework.data.redis.connection.stream.StringRecord; import org.springframework.data.redis.connection.zset.Aggregate; import org.springframework.data.redis.connection.zset.DefaultTuple; import org.springframework.data.redis.connection.zset.Tuple; @@ -2561,35 +2571,36 @@ public Cursor> hScan(byte[] key, ScanOptions options) { return this.delegate.hScan(key, options); } - @Nullable @Override public Long hStrLen(byte[] key, byte[] field) { return convertAndReturn(delegate.hStrLen(key, field), Converters.identityConverter()); } - public @Nullable List expireHashField(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, + public @Nullable List applyExpiration(byte[] key, + org.springframework.data.redis.core.types.Expiration expiration, FieldExpirationOptions options, byte[]... fields) { - return this.delegate.expireHashField(key, expiration, options, fields); + return this.delegate.applyExpiration(key, expiration, options, fields); } @Override - public List hExpire(byte[] key, long seconds, byte[]... fields) { - return this.delegate.hExpire(key, seconds, fields); + public List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) { + return this.delegate.hExpire(key, seconds, condition, fields); } @Override - public List hpExpire(byte[] key, long millis, byte[]... fields) { - return this.delegate.hpExpire(key, millis, fields); + public List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) { + return this.delegate.hpExpire(key, millis, condition, fields); } @Override - public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { - return this.delegate.hExpireAt(key, unixTime, fields); + public List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields) { + return this.delegate.hExpireAt(key, unixTime, condition, fields); } @Override - public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { - return this.delegate.hpExpireAt(key, unixTimeInMillis, fields); + public List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + byte[]... fields) { + return this.delegate.hpExpireAt(key, unixTimeInMillis, condition, fields); } @Override @@ -2612,29 +2623,31 @@ public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { return this.delegate.hTtl(key, timeUnit, fields); } - public @Nullable List expireHashField(String key, org.springframework.data.redis.core.types.Expiration expiration, + public @Nullable List applyExpiration(String key, + org.springframework.data.redis.core.types.Expiration expiration, FieldExpirationOptions options, String... fields) { - return expireHashField(serialize(key), expiration, options, serializeMulti(fields)); + return applyExpiration(serialize(key), expiration, options, serializeMulti(fields)); } @Override - public List hExpire(String key, long seconds, String... fields) { - return hExpire(serialize(key), seconds, serializeMulti(fields)); + public List hExpire(String key, long seconds, FieldExpirationOptions.Condition condition, String... fields) { + return hExpire(serialize(key), seconds, condition, serializeMulti(fields)); } @Override - public List hpExpire(String key, long millis, String... fields) { - return hpExpire(serialize(key), millis, serializeMulti(fields)); + public List hpExpire(String key, long millis, FieldExpirationOptions.Condition condition, String... fields) { + return hpExpire(serialize(key), millis, condition, serializeMulti(fields)); } @Override - public List hExpireAt(String key, long unixTime, String... fields) { - return hExpireAt(serialize(key), unixTime, serializeMulti(fields)); + public List hExpireAt(String key, long unixTime, FieldExpirationOptions.Condition condition, String... fields) { + return hExpireAt(serialize(key), unixTime, condition, serializeMulti(fields)); } @Override - public List hpExpireAt(String key, long unixTimeInMillis, String... fields) { - return hpExpireAt(serialize(key), unixTimeInMillis, serializeMulti(fields)); + public List hpExpireAt(String key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + String... fields) { + return hpExpireAt(serialize(key), unixTimeInMillis, condition, serializeMulti(fields)); } @Override diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java index 5c1befad36..460d883b41 100644 --- a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java @@ -1476,28 +1476,58 @@ default Long hStrLen(byte[] key, byte[] field) { @Override @Deprecated default List hExpire(byte[] key, long seconds, byte[]... fields) { - return hashCommands().hExpire(key, seconds, fields); + return hashCommands().hExpire(key, seconds, FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) { + return hashCommands().hExpire(key, seconds, condition, fields); } /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ @Override @Deprecated default List hpExpire(byte[] key, long millis, byte[]... fields) { - return hashCommands().hpExpire(key, millis, fields); + return hashCommands().hpExpire(key, millis, FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) { + return hashCommands().hpExpire(key, millis, condition, fields); } /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ @Override @Deprecated default List hExpireAt(byte[] key, long unixTime, byte[]... fields) { - return hashCommands().hExpireAt(key, unixTime, fields); + return hashCommands().hExpireAt(key, unixTime, FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, + byte[]... fields) { + return hashCommands().hExpireAt(key, unixTime, condition, fields); } /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ @Override @Deprecated default List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { - return hashCommands().hpExpireAt(key, unixTimeInMillis, fields); + return hashCommands().hpExpireAt(key, unixTimeInMillis, FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + byte[]... fields) { + return hashCommands().hpExpireAt(key, unixTimeInMillis, condition, fields); } /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ @@ -1531,9 +1561,10 @@ default List hpTtl(byte[] key, byte[]... fields) { /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ @Override @Deprecated - default @Nullable List expireHashField(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, - FieldExpirationOptions options, byte[]... fields) { - return hashCommands().expireHashField(key, expiration, options, fields); + default @Nullable List applyExpiration(byte[] key, + org.springframework.data.redis.core.types.Expiration expiration, FieldExpirationOptions options, + byte[]... fields) { + return hashCommands().applyExpiration(key, expiration, options, fields); } // GEO COMMANDS @@ -1907,9 +1938,8 @@ default T evalSha(byte[] scriptSha, ReturnType returnType, int numKeys, byte /** @deprecated in favor of {@link RedisConnection#zSetCommands()}}. */ @Override @Deprecated - default Long zRangeStoreByLex(byte[] dstKey, byte[] srcKey, - org.springframework.data.domain.Range range, - org.springframework.data.redis.connection.Limit limit) { + default Long zRangeStoreByLex(byte[] dstKey, byte[] srcKey, org.springframework.data.domain.Range range, + org.springframework.data.redis.connection.Limit limit) { return zSetCommands().zRangeStoreByLex(dstKey, srcKey, range, limit); } @@ -1926,7 +1956,7 @@ default Long zRangeStoreRevByLex(byte[] dstKey, byte[] srcKey, org.springframewo @Deprecated default Long zRangeStoreByScore(byte[] dstKey, byte[] srcKey, org.springframework.data.domain.Range range, - org.springframework.data.redis.connection.Limit limit) { + org.springframework.data.redis.connection.Limit limit) { return zSetCommands().zRangeStoreByScore(dstKey, srcKey, range, limit); } diff --git a/src/main/java/org/springframework/data/redis/connection/Hash.java b/src/main/java/org/springframework/data/redis/connection/Hash.java index c8d0e8a248..dcf38d99e5 100644 --- a/src/main/java/org/springframework/data/redis/connection/Hash.java +++ b/src/main/java/org/springframework/data/redis/connection/Hash.java @@ -18,7 +18,6 @@ import java.util.Objects; import org.springframework.lang.Contract; -import org.springframework.lang.Nullable; import org.springframework.util.ObjectUtils; /** @@ -29,10 +28,10 @@ public interface Hash { class FieldExpirationOptions { - private static final FieldExpirationOptions NONE = new FieldExpirationOptions(null); - private @Nullable Condition condition; + private static final FieldExpirationOptions NONE = new FieldExpirationOptions(Condition.ALWAYS); + private final Condition condition; - FieldExpirationOptions(@Nullable Condition condition) { + FieldExpirationOptions(Condition condition) { this.condition = condition; } @@ -40,12 +39,11 @@ public static FieldExpirationOptions none() { return NONE; } - @Contract("_ -> new") public static FieldExpireOptionsBuilder builder() { return new FieldExpireOptionsBuilder(); } - public @Nullable Condition getCondition() { + public Condition getCondition() { return condition; } @@ -68,47 +66,63 @@ public int hashCode() { public static class FieldExpireOptionsBuilder { - @Nullable Condition condition; + private Condition condition = Condition.ALWAYS; - @Contract("_ -> this") + @Contract("-> this") public FieldExpireOptionsBuilder nx() { this.condition = Condition.NX; return this; } - @Contract("_ -> this") + @Contract("-> this") public FieldExpireOptionsBuilder xx() { this.condition = Condition.XX; return this; } - @Contract("_ -> this") + @Contract("-> this") public FieldExpireOptionsBuilder gt() { this.condition = Condition.GT; return this; } - @Contract("_ -> this") + @Contract("-> this") public FieldExpireOptionsBuilder lt() { this.condition = Condition.LT; return this; } - @Contract("_ -> !null") public FieldExpirationOptions build() { - return condition == null ? NONE : new FieldExpirationOptions(condition); + return condition == Condition.ALWAYS ? NONE : new FieldExpirationOptions(condition); } + } public enum Condition { - /** Set expiration only when the field has no expiration. */ + /** + * Always apply expiration. + */ + ALWAYS, + + /** + * Set expiration only when the field has no expiration. + */ NX, - /** Set expiration only when the field has an existing expiration. */ + + /** + * Set expiration only when the field has an existing expiration. + */ XX, - /** Set expiration only when the new expiration is greater than current one. */ + + /** + * Set expiration only when the new expiration is greater than current one. + */ GT, - /** Set expiration only when the new expiration is greater than current one. */ + + /** + * Set expiration only when the new expiration is greater than current one. + */ LT } } diff --git a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java index f58f4e32a9..1e9fd94f3a 100644 --- a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java @@ -858,7 +858,9 @@ class ExpireCommand extends HashFieldsCommand { private ExpireCommand(@Nullable ByteBuffer key, List fields, Expiration expiration, FieldExpirationOptions options) { + super(key, fields); + this.expiration = expiration; this.options = options; } @@ -921,6 +923,7 @@ public FieldExpirationOptions getOptions() { default Mono hExpire(ByteBuffer key, Duration duration, ByteBuffer field) { Assert.notNull(duration, "Duration must not be null"); + return hExpire(key, duration, Collections.singletonList(field)).singleOrEmpty(); } @@ -939,9 +942,10 @@ default Mono hExpire(ByteBuffer key, Duration duration, ByteBuffer field) * @since 3.5 */ default Flux hExpire(ByteBuffer key, Duration duration, List fields) { + Assert.notNull(duration, "Duration must not be null"); - return expireHashField(Flux.just(ExpireCommand.expire(fields, duration).from(key))) + return applyExpiration(Flux.just(ExpireCommand.expire(fields, duration).from(key))) .mapNotNull(NumericResponse::getOutput); } @@ -957,7 +961,7 @@ default Flux hExpire(ByteBuffer key, Duration duration, List f * @since 3.5 * @see Redis Documentation: HEXPIRE */ - Flux> expireHashField(Publisher commands); + Flux> applyExpiration(Publisher commands); /** * Expire a given {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. @@ -975,6 +979,7 @@ default Flux hExpire(ByteBuffer key, Duration duration, List f default Mono hpExpire(ByteBuffer key, Duration duration, ByteBuffer field) { Assert.notNull(duration, "Duration must not be null"); + return hpExpire(key, duration, Collections.singletonList(field)).singleOrEmpty(); } @@ -995,7 +1000,8 @@ default Mono hpExpire(ByteBuffer key, Duration duration, ByteBuffer field) default Flux hpExpire(ByteBuffer key, Duration duration, List fields) { Assert.notNull(duration, "Duration must not be null"); - return expireHashField(Flux.just(new ExpireCommand(key, fields, + + return applyExpiration(Flux.just(new ExpireCommand(key, fields, Expiration.from(duration.toMillis(), TimeUnit.MILLISECONDS), FieldExpirationOptions.none()))) .mapNotNull(NumericResponse::getOutput); } @@ -1017,6 +1023,7 @@ default Flux hpExpire(ByteBuffer key, Duration duration, List default Mono hExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field) { Assert.notNull(expireAt, "Duration must not be null"); + return hExpireAt(key, expireAt, Collections.singletonList(field)).singleOrEmpty(); } @@ -1035,9 +1042,10 @@ default Mono hExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field) * @since 3.5 */ default Flux hExpireAt(ByteBuffer key, Instant expireAt, List fields) { + Assert.notNull(expireAt, "Duration must not be null"); - return expireHashField(Flux.just(ExpireCommand.expireAt(fields, expireAt, TimeUnit.SECONDS).from(key))) + return applyExpiration(Flux.just(ExpireCommand.expireAt(fields, expireAt, TimeUnit.SECONDS).from(key))) .mapNotNull(NumericResponse::getOutput); } @@ -1058,6 +1066,7 @@ default Flux hExpireAt(ByteBuffer key, Instant expireAt, List default Mono hpExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field) { Assert.notNull(expireAt, "Duration must not be null"); + return hpExpireAt(key, expireAt, Collections.singletonList(field)).singleOrEmpty(); } @@ -1076,9 +1085,10 @@ default Mono hpExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field * @since 3.5 */ default Flux hpExpireAt(ByteBuffer key, Instant expireAt, List fields) { + Assert.notNull(expireAt, "Duration must not be null"); - return expireHashField(Flux.just(ExpireCommand.expireAt(fields, expireAt, TimeUnit.MILLISECONDS).from(key))) + return applyExpiration(Flux.just(ExpireCommand.expireAt(fields, expireAt, TimeUnit.MILLISECONDS).from(key))) .mapNotNull(NumericResponse::getOutput); } @@ -1135,7 +1145,6 @@ default Flux hPersist(ByteBuffer key, List fields) { * @since 3.5 */ default Mono hTtl(ByteBuffer key, ByteBuffer field) { - return hTtl(key, Collections.singletonList(field)).singleOrEmpty(); } @@ -1151,7 +1160,6 @@ default Mono hTtl(ByteBuffer key, ByteBuffer field) { * @since 3.5 */ default Flux hTtl(ByteBuffer key, List fields) { - return hTtl(Flux.just(new HashFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput); } @@ -1179,7 +1187,6 @@ default Flux hTtl(ByteBuffer key, List fields) { * @since 3.5 */ default Mono hpTtl(ByteBuffer key, ByteBuffer field) { - return hpTtl(key, Collections.singletonList(field)).singleOrEmpty(); } @@ -1195,7 +1202,6 @@ default Mono hpTtl(ByteBuffer key, ByteBuffer field) { * @since 3.5 */ default Flux hpTtl(ByteBuffer key, List fields) { - return hpTtl(Flux.just(new HashFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput); } @@ -1210,4 +1216,5 @@ default Flux hpTtl(ByteBuffer key, List fields) { * @see Redis Documentation: HPTTL */ Flux> hpTtl(Publisher commands); + } diff --git a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java index 5fde9d5db3..f2d736b8ef 100644 --- a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java @@ -25,6 +25,7 @@ import org.springframework.data.redis.core.Cursor; import org.springframework.data.redis.core.ScanOptions; import org.springframework.lang.Nullable; +import org.springframework.util.ObjectUtils; /** * Hash-specific commands supported by Redis. @@ -254,41 +255,78 @@ public interface RedisHashCommands { @Nullable Long hStrLen(byte[] key, byte[] field); - default @Nullable List expireHashField(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, - byte[]... fields) { - return expireHashField(key, expiration, FieldExpirationOptions.none(), fields); + default @Nullable List applyExpiration(byte[] key, + org.springframework.data.redis.core.types.Expiration expiration, byte[]... fields) { + return applyExpiration(key, expiration, FieldExpirationOptions.none(), fields); } + @Nullable + default List applyExpiration(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, + FieldExpirationOptions options, byte[]... fields) { + + if (expiration.isPersistent()) { + return hPersist(key, fields); + } + + if (ObjectUtils.nullSafeEquals(FieldExpirationOptions.none(), options)) { + if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { + if (expiration.isUnixTimestamp()) { + return hpExpireAt(key, expiration.getExpirationTimeInMilliseconds(), fields); + } + return hpExpire(key, expiration.getExpirationTimeInMilliseconds(), fields); + } + if (expiration.isUnixTimestamp()) { + return hExpireAt(key, expiration.getExpirationTimeInSeconds(), fields); + } + return hExpire(key, expiration.getExpirationTimeInSeconds(), fields); + } + + if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { + if (expiration.isUnixTimestamp()) { + return hpExpireAt(key, expiration.getExpirationTimeInMilliseconds(), options.getCondition(), fields); + } + + return hpExpire(key, expiration.getExpirationTimeInMilliseconds(), options.getCondition(), fields); + } + + if (expiration.isUnixTimestamp()) { + return hExpireAt(key, expiration.getExpirationTimeInSeconds(), options.getCondition(), fields); + } + + return hExpire(key, expiration.getExpirationTimeInSeconds(), options.getCondition(), fields); + } - @Nullable List expireHashField(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, - FieldExpirationOptions options, byte[]... fields); - - /** - * Set time to live for given {@code fields} in seconds. - * - * @param key must not be {@literal null}. - * @param seconds the amount of time after which the fields will be expired in seconds, must not be {@literal null}. - * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. - * @see Redis Documentation: HEXPIRE - * @since 3.5 - */ + /** + * Set time to live for given {@code fields} in seconds. + * + * @param key must not be {@literal null}. + * @param seconds the amount of time after which the fields will be expired in seconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ @Nullable - List hExpire(byte[] key, long seconds, byte[]... fields); + default List hExpire(byte[] key, long seconds, byte[]... fields) { + return hExpire(key, seconds, FieldExpirationOptions.Condition.ALWAYS, fields); + } /** * Set time to live for given {@code fields}. * * @param key must not be {@literal null}. - * @param ttl the amount of time after which the fields will be expired in {@link Duration#toSeconds() seconds} precision, must not be {@literal null}. + * @param ttl the amount of time after which the fields will be expired in {@link Duration#toSeconds() seconds} + * precision, must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HEXPIRE * @since 3.5 */ @@ -297,32 +335,56 @@ default List hExpire(byte[] key, Duration ttl, byte[]... fields) { return hExpire(key, ttl.toSeconds(), fields); } + /** + * Set time to live for given {@code fields} in seconds. + * + * @param key must not be {@literal null}. + * @param seconds the amount of time after which the fields will be expired in seconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param condition the condition for expiration, must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields); + /** * Set time to live for given {@code fields} in milliseconds. * * @param key must not be {@literal null}. - * @param millis the amount of time after which the fields will be expired in milliseconds, must not be {@literal null}. + * @param millis the amount of time after which the fields will be expired in milliseconds, must not be + * {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HPEXPIRE * @since 3.5 */ @Nullable - List hpExpire(byte[] key, long millis, byte[]... fields); + default List hpExpire(byte[] key, long millis, byte[]... fields) { + return hpExpire(key, millis, FieldExpirationOptions.Condition.ALWAYS, fields); + } /** * Set time to live for given {@code fields} in milliseconds. * * @param key must not be {@literal null}. - * @param ttl the amount of time after which the fields will be expired in {@link Duration#toMillis() milliseconds} precision, must not be {@literal null}. + * @param ttl the amount of time after which the fields will be expired in {@link Duration#toMillis() milliseconds} + * precision, must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HPEXPIRE * @since 3.5 */ @@ -331,46 +393,109 @@ default List hpExpire(byte[] key, Duration ttl, byte[]... fields) { return hpExpire(key, ttl.toMillis(), fields); } + /** + * Set time to live for given {@code fields} in milliseconds. + * + * @param key must not be {@literal null}. + * @param millis the amount of time after which the fields will be expired in milliseconds, must not be + * {@literal null}. + * @param condition the condition for expiration, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HPEXPIRE + * @since 3.5 + */ + @Nullable + List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields); + /** * Set the expiration for given {@code field} as a {@literal UNIX} timestamp. * * @param key must not be {@literal null}. * @param unixTime the moment in time in which the field expires, must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is - * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not - * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. * @see Redis Documentation: HEXPIREAT * @since 3.5 */ @Nullable - List hExpireAt(byte[] key, long unixTime, byte[]... fields); + default List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + return hExpireAt(key, unixTime, FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp. + * + * @param key must not be {@literal null}. + * @param unixTime the moment in time in which the field expires, must not be {@literal null}. + * @param condition the condition for expiration, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. + * @see Redis Documentation: HEXPIREAT + * @since 3.5 + */ + @Nullable + List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds. + * + * @param key must not be {@literal null}. + * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. + * @see Redis Documentation: HPEXPIREAT + * @since 3.5 + */ + @Nullable + default List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + return hpExpireAt(key, unixTimeInMillis, FieldExpirationOptions.Condition.ALWAYS, fields); + } /** * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds. * * @param key must not be {@literal null}. * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}. + * @param condition the condition for expiration, must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is - * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not - * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. * @see Redis Documentation: HPEXPIREAT * @since 3.5 */ @Nullable - List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields); + List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + byte[]... fields); /** * Remove the expiration from given {@code field}. * * @param key must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; - * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; - * {@literal null} when used in pipeline / transaction.{@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is + * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction.{@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HPERSIST * @since 3.5 */ @@ -382,9 +507,10 @@ default List hpExpire(byte[] key, Duration ttl, byte[]... fields) { * * @param key must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value - * to signal an error. The command returns {@code -1} if the field exists but has no associated expiration time. - * The command returns {@code -2} if the field does not exist; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the field exists but has no associated + * expiration time. The command returns {@code -2} if the field does not exist; {@literal null} when used in + * pipeline / transaction. * @see Redis Documentation: HTTL * @since 3.5 */ @@ -397,9 +523,10 @@ default List hpExpire(byte[] key, Duration ttl, byte[]... fields) { * @param key must not be {@literal null}. * @param timeUnit must not be {@literal null}. * @param fields must not be {@literal null}. - * @return for each of the fields supplied - the time to live in the {@link TimeUnit} provided; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. - * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @return for each of the fields supplied - the time to live in the {@link TimeUnit} provided; or a negative value to + * signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HTTL * @since 3.5 */ @@ -413,9 +540,10 @@ default List hpExpire(byte[] key, Duration ttl, byte[]... fields) { * * @param key must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. - * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HTTL * @since 3.5 */ diff --git a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java index ed0101641e..1a4ca013d7 100644 --- a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java @@ -2333,22 +2333,41 @@ Long zRangeStoreRevByScore(String dstKey, String srcKey, @Nullable Long hStrLen(String key, String field); - // TODO: why why whay is this such a shitty api that there's missing all the NX, XX, GT Options /** * Set time to live for given {@code field} in seconds. * * @param key must not be {@literal null}. * @param seconds the amount of time after which the key will be expired in seconds, must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + default List hExpire(String key, long seconds, String... fields) { + return hExpire(key, seconds, Hash.FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** + * Set time to live for given {@code field} in seconds. + * + * @param key must not be {@literal null}. + * @param seconds the amount of time after which the key will be expired in seconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HEXPIRE - * @since 3.4 + * @since 3.5 */ @Nullable - List hExpire(String key, long seconds, String... fields); + List hExpire(String key, long seconds, Hash.FieldExpirationOptions.Condition condition, String... fields); /** * Set time to live for given {@code field} in milliseconds. @@ -2356,15 +2375,54 @@ Long zRangeStoreRevByScore(String dstKey, String srcKey, * @param key must not be {@literal null}. * @param millis the amount of time after which the key will be expired in milliseconds, must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HPEXPIRE - * @since 3.4 + * @since 3.5 + */ + @Nullable + default List hpExpire(String key, long millis, String... fields) { + return hpExpire(key, millis, Hash.FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** + * Set time to live for given {@code field} in milliseconds. + * + * @param key must not be {@literal null}. + * @param millis the amount of time after which the key will be expired in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HPEXPIRE + * @since 3.5 + */ + @Nullable + List hpExpire(String key, long millis, Hash.FieldExpirationOptions.Condition condition, String... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp. + * + * @param key must not be {@literal null}. + * @param unixTime the moment in time in which the field expires, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. + * @see Redis Documentation: HEXPIREAT + * @since 3.5 */ @Nullable - List hpExpire(String key, long millis, String... fields); + default List hExpireAt(String key, long unixTime, String... fields) { + return hExpireAt(key, unixTime, Hash.FieldExpirationOptions.Condition.ALWAYS, fields); + } /** * Set the expiration for given {@code field} as a {@literal UNIX} timestamp. @@ -2372,15 +2430,16 @@ Long zRangeStoreRevByScore(String dstKey, String srcKey, * @param key must not be {@literal null}. * @param unixTime the moment in time in which the field expires, must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is - * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not - * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. * @see Redis Documentation: HEXPIREAT - * @since 3.4 + * @since 3.5 */ @Nullable - List hExpireAt(String key, long unixTime, String... fields); + List hExpireAt(String key, long unixTime, Hash.FieldExpirationOptions.Condition condition, String... fields); /** * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds. @@ -2388,26 +2447,48 @@ Long zRangeStoreRevByScore(String dstKey, String srcKey, * @param key must not be {@literal null}. * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is - * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not - * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. * @see Redis Documentation: HPEXPIREAT - * @since 3.4 + * @since 3.5 */ @Nullable - List hpExpireAt(String key, long unixTimeInMillis, String... fields); + default List hpExpireAt(String key, long unixTimeInMillis, String... fields) { + return hpExpireAt(key, unixTimeInMillis, Hash.FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds. + * + * @param key must not be {@literal null}. + * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. + * @see Redis Documentation: HPEXPIREAT + * @since 3.5 + */ + @Nullable + List hpExpireAt(String key, long unixTimeInMillis, Hash.FieldExpirationOptions.Condition condition, + String... fields); /** * Remove the expiration from given {@code field}. * * @param key must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; - * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; - * {@literal null} when used in pipeline / transaction.{@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is + * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction.{@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HPERSIST - * @since 3.4 + * @since 3.5 */ @Nullable List hPersist(String key, String... fields); diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java index 3326a00d62..1223ab4c06 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java @@ -33,10 +33,8 @@ import org.springframework.data.redis.core.ScanCursor; import org.springframework.data.redis.core.ScanIteration; import org.springframework.data.redis.core.ScanOptions; -import org.springframework.data.redis.core.types.Expiration; import org.springframework.lang.Nullable; import org.springframework.util.Assert; -import org.springframework.util.ObjectUtils; /** * Cluster {@link RedisHashCommands} implementation for Jedis. @@ -292,88 +290,71 @@ protected ScanIteration> doScan(CursorId cursorId, ScanOpt }.open(); } - @Nullable @Override - public List expireHashField(byte[] key, Expiration expiration, FieldExpirationOptions options, - byte[]... fields) { - - if (expiration.isPersistent()) { - return hPersist(key, fields); - } - - if (ObjectUtils.nullSafeEquals(FieldExpirationOptions.none(), options)) { - if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { - if (expiration.isUnixTimestamp()) { - return hpExpireAt(key, expiration.getExpirationTimeInMilliseconds(), fields); - } - return hpExpire(key, expiration.getExpirationTimeInMilliseconds(), fields); - } - if (expiration.isUnixTimestamp()) { - return hExpireAt(key, expiration.getExpirationTimeInSeconds(), fields); - } - return hExpire(key, expiration.getExpirationTimeInSeconds(), fields); - } - - ExpiryOption option = ExpiryOption.valueOf(options.getCondition().name()); - - if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { - if (expiration.isUnixTimestamp()) { - return connection.getCluster().hpexpireAt(key, expiration.getExpirationTimeInMilliseconds(), option, fields); - } - return connection.getCluster().hpexpire(key, expiration.getExpirationTimeInMilliseconds(), option, fields); - } - - if (expiration.isUnixTimestamp()) { - return connection.getCluster().hexpireAt(key, expiration.getExpirationTimeInSeconds(), option, fields); - } - return connection.getCluster().hexpire(key, expiration.getExpirationTimeInSeconds(), option, fields); - - } - - @Override - public List hExpire(byte[] key, long seconds, byte[]... fields) { + public List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) { Assert.notNull(key, "Key must not be null"); Assert.notNull(fields, "Fields must not be null"); try { - return connection.getCluster().hexpire(key, seconds, fields); + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.getCluster().hexpire(key, seconds, fields); + } + + return connection.getCluster().hexpire(key, seconds, ExpiryOption.valueOf(condition.name()), fields); } catch (Exception ex) { throw convertJedisAccessException(ex); } } @Override - public List hpExpire(byte[] key, long millis, byte[]... fields) { + public List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); Assert.notNull(fields, "Fields must not be null"); try { - return connection.getCluster().hpexpire(key, millis, fields); + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.getCluster().hpexpire(key, millis, fields); + } + + return connection.getCluster().hpexpire(key, millis, ExpiryOption.valueOf(condition.name()), fields); } catch (Exception ex) { throw convertJedisAccessException(ex); } } @Override - public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + public List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields) { Assert.notNull(key, "Key must not be null"); Assert.notNull(fields, "Fields must not be null"); try { - return connection.getCluster().hexpireAt(key, unixTime, fields); + + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.getCluster().hexpireAt(key, unixTime, fields); + } + + return connection.getCluster().hexpireAt(key, unixTime, ExpiryOption.valueOf(condition.name()), fields); } catch (Exception ex) { throw convertJedisAccessException(ex); } } @Override - public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + public List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + byte[]... fields) { + Assert.notNull(key, "Key must not be null"); Assert.notNull(fields, "Fields must not be null"); try { - return connection.getCluster().hpexpireAt(key, unixTimeInMillis, fields); + + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.getCluster().hpexpireAt(key, unixTimeInMillis, fields); + } + + return connection.getCluster().hpexpireAt(key, unixTimeInMillis, ExpiryOption.valueOf(condition.name()), fields); } catch (Exception ex) { throw convertJedisAccessException(ex); } @@ -381,6 +362,7 @@ public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields @Override public List hPersist(byte[] key, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); Assert.notNull(fields, "Fields must not be null"); @@ -393,6 +375,7 @@ public List hPersist(byte[] key, byte[]... fields) { @Override public List hTtl(byte[] key, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); Assert.notNull(fields, "Fields must not be null"); @@ -405,6 +388,7 @@ public List hTtl(byte[] key, byte[]... fields) { @Override public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); Assert.notNull(fields, "Fields must not be null"); @@ -418,6 +402,7 @@ public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { @Override public List hpTtl(byte[] key, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); Assert.notNull(fields, "Fields must not be null"); @@ -439,7 +424,7 @@ public Long hStrLen(byte[] key, byte[] field) { } private DataAccessException convertJedisAccessException(Exception ex) { - return connection.convertJedisAccessException(ex); } + } diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java index e8751e85cb..2e83d8aba0 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java @@ -39,7 +39,6 @@ import org.springframework.data.redis.core.ScanOptions; import org.springframework.lang.Nullable; import org.springframework.util.Assert; -import org.springframework.util.ObjectUtils; /** * {@link RedisHashCommands} implementation for Jedis. @@ -257,62 +256,48 @@ protected void doClose() { } @Override - public List hExpire(byte[] key, long seconds, byte[]... fields) { - return connection.invoke().just(Jedis::hexpire, PipelineBinaryCommands::hexpire, key, seconds, fields); - } + public List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) { - @Override - public List hpExpire(byte[] key, long millis, byte[]... fields) { - return connection.invoke().just(Jedis::hpexpire, PipelineBinaryCommands::hpexpire, key, millis, fields); + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.invoke().just(Jedis::hexpire, PipelineBinaryCommands::hexpire, key, seconds, fields); + } + + ExpiryOption option = ExpiryOption.valueOf(condition.name()); + return connection.invoke().just(Jedis::hexpire, PipelineBinaryCommands::hexpire, key, seconds, option, fields); } @Override - public @Nullable List expireHashField(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, - FieldExpirationOptions options, byte[]... fields) { + public List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) { - if (expiration.isPersistent()) { - return hPersist(key, fields); + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.invoke().just(Jedis::hpexpire, PipelineBinaryCommands::hpexpire, key, millis, fields); } - if (ObjectUtils.nullSafeEquals(FieldExpirationOptions.none(), options)) { - if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { - if (expiration.isUnixTimestamp()) { - return hpExpireAt(key, expiration.getExpirationTimeInMilliseconds(), fields); - } - return hpExpire(key, expiration.getExpirationTimeInMilliseconds(), fields); - } - if (expiration.isUnixTimestamp()) { - return hExpireAt(key, expiration.getExpirationTimeInSeconds(), fields); - } - return hExpire(key, expiration.getExpirationTimeInSeconds(), fields); - } + ExpiryOption option = ExpiryOption.valueOf(condition.name()); + return connection.invoke().just(Jedis::hpexpire, PipelineBinaryCommands::hpexpire, key, millis, option, fields); + } - ExpiryOption option = ExpiryOption.valueOf(options.getCondition().name()); + @Override + public List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields) { - if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { - if (expiration.isUnixTimestamp()) { - return connection.invoke().just(Jedis::hpexpireAt, PipelineBinaryCommands::hpexpireAt, key, - expiration.getExpirationTimeInMilliseconds(), option, fields); - } - return connection.invoke().just(Jedis::hpexpire, PipelineBinaryCommands::hpexpire, key, - expiration.getExpirationTimeInMilliseconds(), option, fields); + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.invoke().just(Jedis::hexpireAt, PipelineBinaryCommands::hexpireAt, key, unixTime, fields); } - if (expiration.isUnixTimestamp()) { - return connection.invoke().just(Jedis::hexpireAt, PipelineBinaryCommands::hexpireAt, key, - expiration.getExpirationTimeInSeconds(), option, fields); - } - return connection.invoke().just(Jedis::hexpire, PipelineBinaryCommands::hexpire, key, - expiration.getExpirationTimeInSeconds(), option, fields); + ExpiryOption option = ExpiryOption.valueOf(condition.name()); + return connection.invoke().just(Jedis::hexpireAt, PipelineBinaryCommands::hexpireAt, key, unixTime, option, fields); } @Override - public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { - return connection.invoke().just(Jedis::hexpireAt, PipelineBinaryCommands::hexpireAt, key, unixTime, fields); - } + public List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + byte[]... fields) { - @Override - public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.invoke().just(Jedis::hpexpireAt, PipelineBinaryCommands::hpexpireAt, key, unixTimeInMillis, + fields); + } + + ExpiryOption option = ExpiryOption.valueOf(condition.name()); return connection.invoke().just(Jedis::hpexpireAt, PipelineBinaryCommands::hpexpireAt, key, unixTimeInMillis, fields); } diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java index 16564fd1eb..032d6230d6 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java @@ -215,63 +215,28 @@ public Cursor> hScan(byte[] key, ScanOptions options) { } @Override - public @Nullable List expireHashField(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, - FieldExpirationOptions options, byte[]... fields) { - - if (expiration.isPersistent()) { - return hPersist(key, fields); - } - - ExpireArgs option = new ExpireArgs() { - @Override - public void build(CommandArgs args) { - - if(ObjectUtils.nullSafeEquals(options, FieldExpirationOptions.none())) { - return; - } - - args.add(options.getCondition().name()); - } - }; - - if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { - if (expiration.isUnixTimestamp()) { - return connection.invoke().fromMany(RedisHashAsyncCommands::hpexpireat, key, - expiration.getExpirationTimeInMilliseconds(), option, fields).toList(); - } - return connection.invoke() - .fromMany(RedisHashAsyncCommands::hpexpire, key, expiration.getExpirationTimeInMilliseconds(), option, fields) - .toList(); - } - - if (expiration.isUnixTimestamp()) { - return connection.invoke() - .fromMany(RedisHashAsyncCommands::hexpireat, key, expiration.getExpirationTimeInSeconds(), option, fields) - .toList(); - } - return connection.invoke() - .fromMany(RedisHashAsyncCommands::hexpire, key, expiration.getExpirationTimeInSeconds(), option, fields) + public List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hexpire, key, seconds, getExpireArgs(condition), fields) .toList(); } @Override - public List hExpire(byte[] key, long seconds, byte[]... fields) { - return connection.invoke().fromMany(RedisHashAsyncCommands::hexpire, key, seconds, fields).toList(); - } - - @Override - public List hpExpire(byte[] key, long millis, byte[]... fields) { - return connection.invoke().fromMany(RedisHashAsyncCommands::hpexpire, key, millis, fields).toList(); + public List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hpexpire, key, millis, getExpireArgs(condition), fields) + .toList(); } @Override - public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { - return connection.invoke().fromMany(RedisHashAsyncCommands::hexpireat, key, unixTime, fields).toList(); + public List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields) { + return connection.invoke() + .fromMany(RedisHashAsyncCommands::hexpireat, key, unixTime, getExpireArgs(condition), fields).toList(); } @Override - public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { - return connection.invoke().fromMany(RedisHashAsyncCommands::hpexpireat, key, unixTimeInMillis, fields).toList(); + public List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + byte[]... fields) { + return connection.invoke() + .fromMany(RedisHashAsyncCommands::hpexpireat, key, unixTimeInMillis, getExpireArgs(condition), fields).toList(); } @Override @@ -349,4 +314,19 @@ private static Entry toEntry(KeyValue value) { return value.hasValue() ? Converters.entryOf(value.getKey(), value.getValue()) : null; } + private ExpireArgs getExpireArgs(FieldExpirationOptions.Condition condition) { + + return new ExpireArgs() { + @Override + public void build(CommandArgs args) { + + if (ObjectUtils.nullSafeEquals(condition, FieldExpirationOptions.Condition.ALWAYS)) { + return; + } + + args.add(condition.name()); + } + }; + } + } diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java index 3cc7bfd9c3..84dd2ca906 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java @@ -269,7 +269,8 @@ public Flux> hStrLen(Publisher> expireHashField(Publisher commands) { + public Flux> applyExpiration(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notNull(command.getKey(), "Key must not be null"); @@ -317,6 +318,7 @@ public void build(CommandArgs args) { @Override public Flux> hPersist(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notNull(command.getKey(), "Key must not be null"); @@ -329,6 +331,7 @@ public Flux> hPersist(Publisher> hTtl(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notNull(command.getKey(), "Key must not be null"); @@ -341,6 +344,7 @@ public Flux> hTtl(Publisher> hpTtl(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notNull(command.getKey(), "Key must not be null"); diff --git a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java index 0503c33094..87e5f9735b 100644 --- a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java @@ -96,7 +96,7 @@ public interface BoundHashOperations extends BoundKeyOperations { Double increment(HK key, double delta); /** - * Return a random key (aka field) from the hash stored at the bound key. + * Return a random key from the hash stored at the bound key. * * @return {@literal null} if the hash does not exist or when used in pipeline / transaction. * @since 2.6 @@ -116,10 +116,10 @@ public interface BoundHashOperations extends BoundKeyOperations { Map.Entry randomEntry(); /** - * Return a random keys (aka fields) from the hash stored at the bound key. If the provided {@code count} argument is - * positive, return a list of distinct keys, capped either at {@code count} or the hash size. If {@code count} is - * negative, the behavior changes and the command is allowed to return the same key multiple times. In this case, the - * number of returned keys is the absolute value of the specified count. + * Return a random keys from the hash stored at the bound key. If the provided {@code count} argument is positive, + * return a list of distinct keys, capped either at {@code count} or the hash size. If {@code count} is negative, the + * behavior changes and the command is allowed to return the same key multiple times. In this case, the number of + * returned keys is the absolute value of the specified count. * * @param count number of keys to return. * @return {@literal null} if key does not exist or when used in pipeline / transaction. @@ -166,14 +166,15 @@ default ExpireChanges expire(Expiration expiration, Collection hashKeys) ExpireChanges expire(Expiration expiration, FieldExpirationOptions options, Collection hashKeys); /** - * Set time to live for given {@code hashKey} (aka field). + * Set time to live for given {@code hashKey} . * * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); {@code -2} - * indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. * @throws IllegalArgumentException if the timeout is {@literal null}. * @see Redis Documentation: HEXPIRE * @since 3.5 @@ -182,14 +183,15 @@ default ExpireChanges expire(Expiration expiration, Collection hashKeys) ExpireChanges expire(Duration timeout, Collection hashKeys); /** - * Set the expiration for given {@code hashKey} (aka field) as a {@literal date} timestamp. + * Set the expiration for given {@code hashKey} as a {@literal date} timestamp. * * @param expireAt must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is - * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. * @see Redis Documentation: HEXPIRE * @since 3.5 @@ -198,12 +200,12 @@ default ExpireChanges expire(Expiration expiration, Collection hashKeys) ExpireChanges expireAt(Instant expireAt, Collection hashKeys); /** - * Remove the expiration from given {@code hashKey} (aka field). + * Remove the expiration from given {@code hashKey} . * * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; - * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; {@literal null} when - * used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is + * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HPERSIST * @since 3.5 */ @@ -211,31 +213,33 @@ default ExpireChanges expire(Expiration expiration, Collection hashKeys) ExpireChanges persist(Collection hashKeys); /** - * Get the time to live for {@code hashKey} (aka field) in seconds. + * Get the time to live for {@code hashKey} in seconds. * * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command - * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HTTL * @since 3.5 */ @Nullable - Expirations getExpire(Collection hashKeys); + Expirations getTimeToLive(Collection hashKeys); /** - * Get the time to live for {@code hashKey} (aka field) and convert it to the given {@link TimeUnit}. + * Get the time to live for {@code hashKey} and convert it to the given {@link TimeUnit}. * * @param timeUnit must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command - * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HTTL * @since 3.5 */ @Nullable - Expirations getExpire(TimeUnit timeUnit, Collection hashKeys); + Expirations getTimeToLive(TimeUnit timeUnit, Collection hashKeys); /** * Get size of hash at the bound key. diff --git a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java index 2be7e0bd3f..2e9e5a2778 100644 --- a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java @@ -224,14 +224,11 @@ public ExpireChanges expire(K key, Duration duration, Collection hashKey byte[] rawKey = rawKey(key); byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); - boolean splitSecond = TimeoutUtils.hasMillis(duration); + boolean hasMillis = TimeoutUtils.hasMillis(duration); - List raw = execute(connection -> { - if (splitSecond) { - return connection.hashCommands().hpExpire(rawKey, duration.toMillis(), rawHashKeys); - } - return connection.hashCommands().hExpire(rawKey, TimeoutUtils.toSeconds(duration), rawHashKeys); - }); + List raw = execute(connection -> TimeoutUtils.hasMillis(duration) + ? connection.hashCommands().hpExpire(rawKey, duration.toMillis(), rawHashKeys) + : connection.hashCommands().hExpire(rawKey, TimeoutUtils.toSeconds(duration), rawHashKeys)); return raw != null ? ExpireChanges.of(orderedKeys, raw) : null; } @@ -243,8 +240,7 @@ public ExpireChanges expireAt(K key, Instant instant, Collection hashKey byte[] rawKey = rawKey(key); byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); - - Long millis = instant.toEpochMilli(); + long millis = instant.toEpochMilli(); List raw = execute(connection -> TimeoutUtils.containsSplitSecond(millis) ? connection.hashCommands().hpExpireAt(rawKey, millis, rawHashKeys) @@ -257,10 +253,10 @@ public ExpireChanges expireAt(K key, Instant instant, Collection hashKey public ExpireChanges expire(K key, Expiration expiration, FieldExpirationOptions options, Collection hashKeys) { List orderedKeys = List.copyOf(hashKeys); - byte[] rawKey = rawKey(key); byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); - List raw = execute(connection -> connection.hashCommands().expireHashField(rawKey, expiration, options, rawHashKeys)); + List raw = execute( + connection -> connection.hashCommands().applyExpiration(rawKey, expiration, options, rawHashKeys)); return raw != null ? ExpireChanges.of(orderedKeys, raw) : null; } @@ -269,7 +265,6 @@ public ExpireChanges expire(K key, Expiration expiration, FieldExpirationOpt public ExpireChanges persist(K key, Collection hashKeys) { List orderedKeys = List.copyOf(hashKeys); - byte[] rawKey = rawKey(key); byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); @@ -279,7 +274,7 @@ public ExpireChanges persist(K key, Collection hashKeys) { } @Override - public Expirations getExpire(K key, TimeUnit timeUnit, Collection hashKeys) { + public Expirations getTimeToLive(K key, TimeUnit timeUnit, Collection hashKeys) { if(timeUnit.compareTo(TimeUnit.MILLISECONDS) < 0) { throw new IllegalArgumentException("%s precision is not supported must be >= MILLISECONDS".formatted(timeUnit)); diff --git a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java index d373a7f063..3a9f8aae1c 100644 --- a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java @@ -15,10 +15,6 @@ */ package org.springframework.data.redis.core; -import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; -import org.springframework.data.redis.connection.ReactiveHashCommands.ExpireCommand; -import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse; -import org.springframework.data.redis.core.types.Expiration; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -33,10 +29,15 @@ import java.util.function.Function; import org.reactivestreams.Publisher; + import org.springframework.dao.InvalidDataAccessApiUsageException; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.ReactiveHashCommands; +import org.springframework.data.redis.connection.ReactiveHashCommands.ExpireCommand; +import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse; import org.springframework.data.redis.connection.convert.Converters; import org.springframework.data.redis.core.Expirations.Timeouts; +import org.springframework.data.redis.core.types.Expiration; import org.springframework.data.redis.serializer.RedisSerializationContext; import org.springframework.lang.Nullable; import org.springframework.util.Assert; @@ -254,7 +255,9 @@ public Mono> expire(H key, Expiration expiration, FieldExpirat List rawHashKeys = orderedKeys.stream().map(this::rawHashKey).toList(); Mono> raw =createFlux(connection -> { - return connection.expireHashField(Mono.just(ExpireCommand.expire(rawHashKeys, expiration).from(rawKey).withOptions(options))).map(NumericResponse::getOutput); + return connection + .applyExpiration(Mono.just(ExpireCommand.expire(rawHashKeys, expiration).from(rawKey).withOptions(options))) + .map(NumericResponse::getOutput); }).collectList(); return raw.map(values -> ExpireChanges.of(orderedKeys, values)); @@ -288,7 +291,7 @@ public Mono> persist(H key, Collection hashKeys) { @Nullable @Override - public Mono> getExpire(H key, TimeUnit timeUnit, Collection hashKeys) { + public Mono> getTimeToLive(H key, TimeUnit timeUnit, Collection hashKeys) { if (timeUnit.compareTo(TimeUnit.MILLISECONDS) < 0) { throw new IllegalArgumentException("%s precision is not supported must be >= MILLISECONDS".formatted(timeUnit)); diff --git a/src/main/java/org/springframework/data/redis/core/Expirations.java b/src/main/java/org/springframework/data/redis/core/Expirations.java index 958f90e3a8..11e66fc86f 100644 --- a/src/main/java/org/springframework/data/redis/core/Expirations.java +++ b/src/main/java/org/springframework/data/redis/core/Expirations.java @@ -29,31 +29,31 @@ import org.springframework.util.ObjectUtils; /** - * Value Object linking a number of keys to their {@link Expiration} retaining the order of the original source. + * Value Object linking a number of keys to their {@link TimeToLive} retaining the order of the original source. * Dedicated higher level methods interpret raw expiration values retrieved from a Redis Client. *
    *
  1. {@link #persistent()} returns keys that do not have an associated time to live
  2. *
  3. {@link #missing()} returns keys that do not exist and therefore have no associated time to live
  4. - *
  5. {@link #expirations()} returns the ordered list of {@link Expiration expirations} based on the raw values
  6. + *
  7. {@link #ttl()} returns the ordered list of {@link TimeToLive expirations} based on the raw values
  8. *
  9. {@link #expiring()} returns the expiring keys along with their {@link Duration time to live}
  10. *
- * + * * @author Christoph Strobl * @since 3.5 */ public class Expirations { // TODO: should we move this to let's say Hash.class or another place private final TimeUnit unit; - private final Map expirations; + private final Map expirations; - Expirations(TimeUnit unit, Map expirations) { + Expirations(TimeUnit unit, Map expirations) { this.unit = unit; this.expirations = expirations; } /** * Factory Method to create {@link Expirations} from raw sources provided in a given {@link TimeUnit}. - * + * * @param targetUnit the actual time unit of the raw timeToLive values. * @param keys the keys to associated with the raw values in timeToLive. Defines the actual order of entries within * {@link Expirations}. @@ -69,12 +69,12 @@ public static Expirations of(TimeUnit targetUnit, List keys, Timeouts } if (keys.size() == 1) { return new Expirations<>(targetUnit, - Map.of(keys.iterator().next(), Expiration.of(timeouts.raw().iterator().next(), timeouts.timeUnit()))); + Map.of(keys.iterator().next(), TimeToLive.of(timeouts.raw().iterator().next(), timeouts.timeUnit()))); } - Map target = CollectionUtils.newLinkedHashMap(keys.size()); + Map target = CollectionUtils.newLinkedHashMap(keys.size()); for (int i = 0; i < keys.size(); i++) { - target.put(keys.get(i), Expiration.of(timeouts.get(i), timeouts.timeUnit())); + target.put(keys.get(i), TimeToLive.of(timeouts.get(i), timeouts.timeUnit())); } return new Expirations<>(targetUnit, target); } @@ -83,26 +83,26 @@ public static Expirations of(TimeUnit targetUnit, List keys, Timeouts * @return an ordered set of keys that do not have a time to live. */ public Set persistent() { - return filterByState(Expiration.PERSISTENT); + return filterByState(TimeToLive.PERSISTENT); } /** * @return an ordered set of keys that do not exists and therefore do not have a time to live. */ public Set missing() { - return filterByState(Expiration.MISSING); + return filterByState(TimeToLive.MISSING); } /** - * @return an ordered set of all {@link Expirations expirations} where the {@link Expiration#value()} is using the + * @return an ordered set of all {@link Expirations expirations} where the {@link TimeToLive#value()} is using the * {@link TimeUnit} defined in {@link #precision()}. */ - public List expirations() { + public List ttl() { return expirations.values().stream().map(it -> it.convert(this.unit)).toList(); } /** - * @return the {@link TimeUnit} for {@link Expiration expirations} held by this instance. + * @return the {@link TimeUnit} for {@link TimeToLive expirations} held by this instance. */ public TimeUnit precision() { return unit; @@ -110,7 +110,7 @@ public TimeUnit precision() { /** * @return an ordered {@link List} of {@link java.util.Map.Entry entries} combining keys with their actual time to - * live. {@link Expiration#isMissing() Missing} and {@link Expiration#isPersistent() persistent} entries are + * live. {@link TimeToLive#isMissing() Missing} and {@link TimeToLive#isPersistent() persistent} entries are * skipped. */ public List> expiring() { @@ -120,48 +120,48 @@ public List> expiring() { /** * @param key - * @return the {@link Expirations expirations} where the {@link Expiration#value()} is using the {@link TimeUnit} + * @return the {@link Expirations expirations} where the {@link TimeToLive#value()} is using the {@link TimeUnit} * defined in {@link #precision()} or {@literal null} if no entry could be found. */ @Nullable - public Expiration expirationOf(K key) { + public TimeToLive expirationOf(K key) { - Expiration expiration = expirations.get(key); - if (expiration == null) { + TimeToLive timeToLive = expirations.get(key); + if (timeToLive == null) { return null; } - return expiration.convert(this.unit); + return timeToLive.convert(this.unit); } /** * @param key * @return the time to live value of the requested key if it exists and the expiration is neither - * {@link Expiration#isMissing() missing} nor {@link Expiration#isPersistent() persistent}, {@literal null} + * {@link TimeToLive#isMissing() missing} nor {@link TimeToLive#isPersistent() persistent}, {@literal null} * otherwise. */ @Nullable public Duration ttlOf(K key) { - Expiration expiration = expirationOf(key); - if (expiration == null) { + TimeToLive timeToLive = expirationOf(key); + if (timeToLive == null) { return null; } - return toDuration(expiration); + return toDuration(timeToLive); } - private Set filterByState(Expiration filter) { + private Set filterByState(TimeToLive filter) { return expirations.entrySet().stream().filter(entry -> entry.getValue().equals(filter)).map(Map.Entry::getKey) .collect(Collectors.toCollection(LinkedHashSet::new)); } @Nullable - static Duration toDuration(Expiration expiration) { + static Duration toDuration(TimeToLive timeToLive) { - if (expiration.sourceUnit == null) { + if (timeToLive.sourceUnit == null) { return null; } - return Duration.of(expiration.raw(), expiration.sourceUnit.toChronoUnit()); + return Duration.of(timeToLive.raw(), timeToLive.sourceUnit.toChronoUnit()); } public record Timeouts(TimeUnit timeUnit, List raw) { @@ -182,30 +182,55 @@ public int size() { * {@link #PERSISTENT} mark predefined states returned by Redis indicating a time to live value could not be retrieved * due to various reasons. */ - public static class Expiration { // TODO: is Expiry a better name for this type? + public static class TimeToLive { // TODO: is Expiry a better name for this type? + + /** + * Predefined {@link TimeToLive} for a key that does not exists and therefore does not have a time to live. + */ + public static TimeToLive MISSING = new TimeToLive(-2L); + + /** + * Predefined {@link TimeToLive} for a key that exists but does not expire. + */ + public static TimeToLive PERSISTENT = new TimeToLive(-1L); private final long raw; @Nullable TimeUnit sourceUnit; @Nullable TimeUnit targetUnit; - public Expiration(long value) { + public TimeToLive(long value) { this(value, null); } - public Expiration(long value, @Nullable TimeUnit sourceUnit) { + public TimeToLive(long value, @Nullable TimeUnit sourceUnit) { this(value, sourceUnit, null); } - public Expiration(long value, @Nullable TimeUnit sourceUnit, @Nullable TimeUnit targetUnit) { + public TimeToLive(long value, @Nullable TimeUnit sourceUnit, @Nullable TimeUnit targetUnit) { this.raw = value; this.sourceUnit = sourceUnit; this.targetUnit = targetUnit; } + /** + * Factory method for creating {@link TimeToLive} instances, returning predefined ones if the value matches a known + * reserved state. + * + * @return the {@link TimeToLive} for the given raw value. + */ + static TimeToLive of(Number value, TimeUnit timeUnit) { + + return switch (value.intValue()) { + case -2 -> MISSING; + case -1 -> PERSISTENT; + default -> new TimeToLive(value.longValue(), timeUnit); + }; + } + /** * The raw source value as returned by the Redis Client. * - * @return the raw data + * @return the raw data. */ public long raw() { return raw; @@ -219,31 +244,23 @@ public long value() { if (sourceUnit == null || targetUnit == null) { return raw; } + return targetUnit.convert(raw, sourceUnit); } /** * @param timeUnit must not be {@literal null}. - * @return the {@link Expiration} instance with new target {@link TimeUnit} set for obtaining the {@link #value() + * @return the {@link TimeToLive} instance with new target {@link TimeUnit} set for obtaining the {@link #value() * value}, or the same instance raw value cannot or must not be converted. */ - public Expiration convert(TimeUnit timeUnit) { + public TimeToLive convert(TimeUnit timeUnit) { if (sourceUnit == null || ObjectUtils.nullSafeEquals(sourceUnit, timeUnit)) { return this; } - return new Expiration(raw, sourceUnit, timeUnit); - } - /** - * Predefined {@link Expiration} for a key that does not exists and therefore does not have a time to live. - */ - public static Expiration MISSING = new Expiration(-2L); - - /** - * Predefined {@link Expiration} for a key that exists but does not expire. - */ - public static Expiration PERSISTENT = new Expiration(-1L); + return new TimeToLive(raw, sourceUnit, timeUnit); + } /** * @return {@literal true} if key exists but does not expire. @@ -253,34 +270,19 @@ public boolean isPersistent() { } /** - * @return {@literal true} if key does not exists and therefore does not have a time to live. + * @return {@literal true} if key does not exist and therefore does not have a time to live. */ public boolean isMissing() { return MISSING.equals(this); } - /** - * Factory method for creating {@link Expiration} instances, returning predefined ones if the value matches a known - * reserved state. - * - * @return the {@link Expiration} for the given raw value. - */ - static Expiration of(Number value, TimeUnit timeUnit) { - return switch (value.intValue()) { - case -2 -> MISSING; - case -1 -> PERSISTENT; - default -> new Expiration(value.longValue(), timeUnit); - }; - } - @Override public boolean equals(Object o) { - if (o == this) { return true; } - if (!(o instanceof Expiration that)) { + if (!(o instanceof Expirations.TimeToLive that)) { return false; } diff --git a/src/main/java/org/springframework/data/redis/core/ExpireChanges.java b/src/main/java/org/springframework/data/redis/core/ExpireChanges.java index b9486f639d..029922d96e 100644 --- a/src/main/java/org/springframework/data/redis/core/ExpireChanges.java +++ b/src/main/java/org/springframework/data/redis/core/ExpireChanges.java @@ -15,6 +15,7 @@ */ package org.springframework.data.redis.core; +import java.util.Collections; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -23,6 +24,7 @@ import java.util.function.Predicate; import java.util.stream.Collectors; +import org.springframework.util.Assert; import org.springframework.util.CollectionUtils; /** @@ -35,7 +37,7 @@ *
  • {@link #skipped()} returns keys for which the time to live has not been set because a precondition was not * met
  • * - * + * * @author Christoph Strobl * @since 3.5 */ @@ -50,23 +52,25 @@ public class ExpireChanges { /** * Factory Method to create {@link ExpireChanges} from raw sources. * - * @param keys the keys to associated with the raw values in states. Defines the actual order of entries within + * @param fields the fields to associated with the raw values in states. Defines the actual order of entries within * {@link ExpireChanges}. * @param states the raw Redis state change values. * @return new instance of {@link ExpireChanges}. * @param the key type used */ - public static ExpireChanges of(List keys, List states) { + public static ExpireChanges of(List fields, List states) { + + Assert.isTrue(fields.size() == states.size(), "Keys and States must have the same number of elements"); - if (keys.size() == 1) { - return new ExpireChanges<>(Map.of(keys.iterator().next(), stateFromValue(states.iterator().next()))); + if (fields.size() == 1) { + return new ExpireChanges<>(Map.of(fields.iterator().next(), stateFromValue(states.iterator().next()))); } - Map target = CollectionUtils.newLinkedHashMap(keys.size()); - for (int i = 0; i < keys.size(); i++) { - target.put(keys.get(i), stateFromValue(states.get(i))); + Map target = CollectionUtils.newLinkedHashMap(fields.size()); + for (int i = 0; i < fields.size(); i++) { + target.put(fields.get(i), stateFromValue(states.get(i))); } - return new ExpireChanges<>(target); + return new ExpireChanges<>(Collections.unmodifiableMap(target)); } /** diff --git a/src/main/java/org/springframework/data/redis/core/HashOperations.java b/src/main/java/org/springframework/data/redis/core/HashOperations.java index c32c33983c..89bf1ac599 100644 --- a/src/main/java/org/springframework/data/redis/core/HashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/HashOperations.java @@ -96,7 +96,7 @@ public interface HashOperations { Double increment(H key, HK hashKey, double delta); /** - * Return a random hash key (aka field) from the hash stored at {@code key}. + * Return a random hash key from the hash stored at {@code key}. * * @param key must not be {@literal null}. * @return {@literal null} if key does not exist or when used in pipeline / transaction. @@ -118,10 +118,10 @@ public interface HashOperations { Map.Entry randomEntry(H key); /** - * Return random hash keys (aka fields) from the hash stored at {@code key}. If the provided {@code count} argument is - * positive, return a list of distinct hash keys, capped either at {@code count} or the hash size. If {@code count} is - * negative, the behavior changes and the command is allowed to return the same hash key multiple times. In this case, - * the number of returned fields is the absolute value of the specified count. + * Return random hash keys from the hash stored at {@code key}. If the provided {@code count} argument is positive, + * return a list of distinct hash keys, capped either at {@code count} or the hash size. If {@code count} is negative, + * the behavior changes and the command is allowed to return the same hash key multiple times. In this case, the + * number of returned fields is the absolute value of the specified count. * * @param key must not be {@literal null}. * @param count number of fields to return. @@ -228,15 +228,16 @@ public interface HashOperations { Cursor> scan(H key, ScanOptions options); /** - * Set time to live for given {@code hashKey} (aka field). + * Set time to live for given {@code hashKey} . * * @param key must not be {@literal null}. * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); {@code -2} - * indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. * @throws IllegalArgumentException if the timeout is {@literal null}. * @see Redis Documentation: HEXPIRE * @since 3.5 @@ -245,15 +246,16 @@ public interface HashOperations { ExpireChanges expire(H key, Duration timeout, Collection hashKeys); /** - * Set the expiration for given {@code hashKey} (aka field) as a {@literal date} timestamp. + * Set the expiration for given {@code hashKeys} as a {@literal date} timestamp. * * @param key must not be {@literal null}. * @param expireAt must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is - * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. * @see Redis Documentation: HEXPIRE * @since 3.5 @@ -261,16 +263,33 @@ public interface HashOperations { @Nullable ExpireChanges expireAt(H key, Instant expireAt, Collection hashKeys); + /** + * Apply the expiration for given {@code hashKeys} as a {@literal date} timestamp. + * + * @param key must not be {@literal null}. + * @param expiration must not be {@literal null}. + * @param options must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. + * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable ExpireChanges expire(H key, Expiration expiration, FieldExpirationOptions options, Collection hashKeys); /** - * Remove the expiration from given {@code hashKey} (aka field). + * Remove the expiration from given {@code hashKeys} . * * @param key must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; - * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; {@literal null} when - * used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is + * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HPERSIST * @since 3.5 */ @@ -278,38 +297,41 @@ public interface HashOperations { ExpireChanges persist(H key, Collection hashKeys); /** - * Get the time to live for {@code hashKey} (aka field) in seconds. + * Get the time to live for {@code hashKeys} in seconds. * * @param key must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command - * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HTTL * @since 3.5 */ @Nullable - default Expirations getExpire(H key, Collection hashKeys) { - return getExpire(key, TimeUnit.SECONDS, hashKeys); + default Expirations getTimeToLive(H key, Collection hashKeys) { + return getTimeToLive(key, TimeUnit.SECONDS, hashKeys); } /** - * Get the time to live for {@code hashKey} (aka field) and convert it to the given {@link TimeUnit}. + * Get the time to live for {@code hashKeys} and convert it to the given {@link TimeUnit}. * * @param key must not be {@literal null}. * @param timeUnit must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command - * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HTTL * @since 3.5 */ @Nullable - Expirations getExpire(H key, TimeUnit timeUnit, Collection hashKeys); + Expirations getTimeToLive(H key, TimeUnit timeUnit, Collection hashKeys); /** * @return never {@literal null}. */ RedisOperations getOperations(); + } diff --git a/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java b/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java index 2d0cbcaf10..aac3c6a642 100644 --- a/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java @@ -15,8 +15,6 @@ */ package org.springframework.data.redis.core; -import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; -import org.springframework.data.redis.core.types.Expiration; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -28,6 +26,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; +import org.springframework.data.redis.core.types.Expiration; import org.springframework.lang.Nullable; /** @@ -102,7 +102,7 @@ public interface ReactiveHashOperations { Mono increment(H key, HK hashKey, double delta); /** - * Return a random hash key (aka field) from the hash stored at {@code key}. + * Return a random hash key from the hash stored at {@code key}. * * @param key must not be {@literal null}. * @return @@ -122,10 +122,10 @@ public interface ReactiveHashOperations { Mono> randomEntry(H key); /** - * Return random hash keys (aka fields) from the hash stored at {@code key}. If the provided {@code count} argument is - * positive, return a list of distinct hash keys, capped either at {@code count} or the hash size. If {@code count} is - * negative, the behavior changes and the command is allowed to return the same hash key multiple times. In this case, - * the number of returned fields is the absolute value of the specified count. + * Return random hash keys from the hash stored at {@code key}. If the provided {@code count} argument is positive, + * return a list of distinct hash keys, capped either at {@code count} or the hash size. If {@code count} is negative, + * the behavior changes and the command is allowed to return the same hash key multiple times. In this case, the + * number of returned fields is the absolute value of the specified count. * * @param key must not be {@literal null}. * @param count number of fields to return. @@ -242,7 +242,7 @@ default Flux> scan(H key) { Mono> expire(H key, Expiration expiration, FieldExpirationOptions options, Collection hashKeys); /** - * Set the expiration for given {@code hashKey} (aka field) as a {@literal date} timestamp. + * Set the expiration for given {@code hashKey} as a {@literal date} timestamp. * * @param key must not be {@literal null}. * @param expireAt must not be {@literal null}. @@ -260,7 +260,7 @@ default Flux> scan(H key) { Mono> expireAt(H key, Instant expireAt, Collection hashKeys); /** - * Remove the expiration from given {@code hashKey} (aka field). + * Remove the expiration from given {@code hashKey} . * * @param key must not be {@literal null}. * @param hashKeys must not be {@literal null}. @@ -274,7 +274,7 @@ default Flux> scan(H key) { Mono> persist(H key, Collection hashKeys); /** - * Get the time to live for {@code hashKey} (aka field) in seconds. + * Get the time to live for {@code hashKey} in seconds. * * @param key must not be {@literal null}. * @param hashKeys must not be {@literal null}. @@ -286,12 +286,12 @@ default Flux> scan(H key) { * @since 3.5 */ @Nullable - default Mono> getExpire(H key, Collection hashKeys) { - return getExpire(key, TimeUnit.SECONDS, hashKeys); + default Mono> getTimeToLive(H key, Collection hashKeys) { + return getTimeToLive(key, TimeUnit.SECONDS, hashKeys); } /** - * Get the time to live for {@code hashKey} (aka field) and convert it to the given {@link TimeUnit}. + * Get the time to live for {@code hashKey} and convert it to the given {@link TimeUnit}. * * @param key must not be {@literal null}. * @param timeUnit must not be {@literal null}. @@ -304,7 +304,7 @@ default Mono> getExpire(H key, Collection hashKeys) { * @since 3.5 */ @Nullable - Mono> getExpire(H key, TimeUnit timeUnit, Collection hashKeys); + Mono> getTimeToLive(H key, TimeUnit timeUnit, Collection hashKeys); /** * Removes the given {@literal key}. diff --git a/src/main/java/org/springframework/data/redis/core/RedisOperations.java b/src/main/java/org/springframework/data/redis/core/RedisOperations.java index 8c1ad67ad6..def0dca04b 100644 --- a/src/main/java/org/springframework/data/redis/core/RedisOperations.java +++ b/src/main/java/org/springframework/data/redis/core/RedisOperations.java @@ -376,6 +376,7 @@ default Boolean expireAt(K key, Instant expireAt) { @Nullable Boolean persist(K key); + // TODO: Add TimeToLive (getTimeToLive) /** * Get the time to live for {@code key} in seconds. * diff --git a/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java b/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java index ad22195ad0..8cc271d9f4 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java +++ b/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java @@ -21,15 +21,14 @@ import java.util.Collections; import java.util.Date; import java.util.Map; -import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; import org.springframework.data.redis.connection.DataType; -import org.springframework.data.redis.core.Expirations; -import org.springframework.data.redis.core.ExpireChanges; import org.springframework.data.redis.core.BoundHashOperations; import org.springframework.data.redis.core.Cursor; +import org.springframework.data.redis.core.Expirations; +import org.springframework.data.redis.core.ExpireChanges; import org.springframework.data.redis.core.RedisOperations; import org.springframework.data.redis.core.ScanOptions; import org.springframework.data.redis.core.SessionCallback; @@ -329,27 +328,27 @@ public Cursor> scan() { @Override public ExpireChanges expire(Duration timeout, Collection hashKeys) { - return Objects.requireNonNull(hashOps.expire(timeout, hashKeys)); + return hashOps.expire(timeout, hashKeys); } @Override public ExpireChanges expireAt(Instant expireAt, Collection hashKeys) { - return Objects.requireNonNull(hashOps.expireAt(expireAt, hashKeys)); + return hashOps.expireAt(expireAt, hashKeys); } @Override public ExpireChanges persist(Collection hashKeys) { - return Objects.requireNonNull(hashOps.persist(hashKeys)); + return hashOps.persist(hashKeys); } @Override - public Expirations getExpire(Collection hashKeys) { - return Objects.requireNonNull(hashOps.getExpire(hashKeys)); + public Expirations getTimeToLive(Collection hashKeys) { + return hashOps.getTimeToLive(hashKeys); } @Override - public Expirations getExpire(TimeUnit timeUnit, Collection hashKeys) { - return Objects.requireNonNull(hashOps.getExpire(timeUnit, hashKeys)); + public Expirations getTimeToLive(TimeUnit timeUnit, Collection hashKeys) { + return hashOps.getTimeToLive(timeUnit, hashKeys); } private void checkResult(@Nullable Object obj) { diff --git a/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java b/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java index 54d002d549..0ad2345454 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java +++ b/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java @@ -71,7 +71,7 @@ public interface RedisMap extends RedisStore, ConcurrentMap { * @since 2.6 */ @Nullable - Map.Entry randomEntry(); + Map.Entry randomEntry(); /** * @since 1.4 @@ -80,69 +80,79 @@ public interface RedisMap extends RedisStore, ConcurrentMap { Iterator> scan(); /** - * Set time to live for given {hash {@code key}. + * Set time to live for given {@code hashKeys}. * * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; - * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); {@code -2} - * indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. * @throws IllegalArgumentException if the timeout is {@literal null}. * @see Redis Documentation: HEXPIRE * @since 3.5 */ + @Nullable ExpireChanges expire(Duration timeout, Collection hashKeys); /** - * Set the expiration for given hash {@code key} as a {@literal date} timestamp. + * Set the expiration for given {@code hashKeys} as a {@literal date} timestamp. * * @param expireAt must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted - * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is - * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); - * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. * @see Redis Documentation: HEXPIRE * @since 3.5 */ + @Nullable ExpireChanges expireAt(Instant expireAt, Collection hashKeys); /** - * Remove the expiration from given hash {@code key}. + * Remove the expiration from given {@code hashKeys}. * * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; - * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; {@literal null} when - * used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is + * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HPERSIST * @since 3.5 */ + @Nullable ExpireChanges persist(Collection hashKeys); /** - * Get the time to live for hash {@code key} in seconds. + * Get the time to live for {@code hashKeys} in seconds. * * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command - * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HTTL * @since 3.5 */ - Expirations getExpire(Collection hashKeys); + @Nullable + Expirations getTimeToLive(Collection hashKeys); /** - * Get the time to live for hash {@code key} and convert it to the given {@link TimeUnit}. + * Get the time to live for {@code hashKeys} and convert it to the given {@link TimeUnit}. * * @param timeUnit must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command - * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. * @see Redis Documentation: HTTL * @since 3.5 */ - Expirations getExpire(TimeUnit timeUnit, Collection hashKeys); + @Nullable + Expirations getTimeToLive(TimeUnit timeUnit, Collection hashKeys); + } diff --git a/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java b/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java index 54d7f0c9d3..0819414821 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java +++ b/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java @@ -19,14 +19,22 @@ import java.io.OutputStream; import java.time.Duration; import java.time.Instant; -import java.util.*; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.Enumeration; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.Map; import java.util.Map.Entry; +import java.util.Properties; +import java.util.Set; import java.util.concurrent.TimeUnit; import org.springframework.data.redis.connection.DataType; +import org.springframework.data.redis.core.BoundHashOperations; import org.springframework.data.redis.core.Expirations; import org.springframework.data.redis.core.ExpireChanges; -import org.springframework.data.redis.core.BoundHashOperations; import org.springframework.data.redis.core.RedisOperations; import org.springframework.lang.Nullable; @@ -323,17 +331,17 @@ public ExpireChanges persist(Collection hashKeys) { } @Override - public Expirations getExpire(Collection hashKeys) { + public Expirations getTimeToLive(Collection hashKeys) { Collection keys = hashKeys.stream().map(key -> (String) key).toList(); - return (Expirations) hashOps.getExpire(keys); + return (Expirations) hashOps.getTimeToLive(keys); } @Override - public Expirations getExpire(TimeUnit timeUnit, Collection hashKeys) { + public Expirations getTimeToLive(TimeUnit timeUnit, Collection hashKeys) { Collection keys = hashKeys.stream().map(key -> (String) key).toList(); - return (Expirations) hashOps.getExpire(timeUnit, keys); + return (Expirations) hashOps.getTimeToLive(timeUnit, keys); } } diff --git a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java index 4abd23dacf..f34ee94e0b 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java @@ -15,9 +15,8 @@ */ package org.springframework.data.redis.core; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.assertj.core.api.Assumptions.assumeThat; +import static org.assertj.core.api.Assertions.*; +import static org.assertj.core.api.Assumptions.*; import java.io.IOException; import java.time.Duration; @@ -30,14 +29,14 @@ import org.assertj.core.api.InstanceOfAssertFactories; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; + import org.springframework.data.redis.ObjectFactory; import org.springframework.data.redis.RawObjectFactory; import org.springframework.data.redis.StringObjectFactory; import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.jedis.JedisConnectionFactory; import org.springframework.data.redis.connection.jedis.extension.JedisConnectionFactoryExtension; -import org.springframework.data.redis.core.Expirations.Expiration; +import org.springframework.data.redis.core.Expirations.TimeToLive; import org.springframework.data.redis.core.ExpireChanges.ExpiryChangeState; import org.springframework.data.redis.test.condition.EnabledOnCommand; import org.springframework.data.redis.test.extension.RedisStanalone; @@ -228,11 +227,11 @@ void testExpireAndGetExpireMillis() { assertThat(redisTemplate.opsForHash().expire(key, Duration.ofMillis(500), List.of(key1))) .satisfies(ExpireChanges::allOk); - assertThat(redisTemplate.opsForHash().getExpire(key, List.of(key1))).satisfies(expirations -> { + assertThat(redisTemplate.opsForHash().getTimeToLive(key, List.of(key1))).satisfies(expirations -> { assertThat(expirations.missing()).isEmpty(); assertThat(expirations.precision()).isEqualTo(TimeUnit.SECONDS); - assertThat(expirations.expirationOf(key1)).extracting(Expiration::raw, InstanceOfAssertFactories.LONG) + assertThat(expirations.expirationOf(key1)).extracting(TimeToLive::raw, InstanceOfAssertFactories.LONG) .isBetween(0L, 1L); assertThat(expirations.ttlOf(key1)).isBetween(Duration.ZERO, Duration.ofSeconds(1)); }); @@ -259,11 +258,11 @@ void testExpireAndGetExpireSeconds() { assertThat(changes.stateChanges()).map(ExpiryChangeState::value).containsExactly(1L, 1L); }); - assertThat(redisTemplate.opsForHash().getExpire(key, TimeUnit.SECONDS, List.of(key1, key2))) + assertThat(redisTemplate.opsForHash().getTimeToLive(key, TimeUnit.SECONDS, List.of(key1, key2))) .satisfies(expirations -> { assertThat(expirations.missing()).isEmpty(); assertThat(expirations.precision()).isEqualTo(TimeUnit.SECONDS); - assertThat(expirations.expirationOf(key1)).extracting(Expiration::raw, InstanceOfAssertFactories.LONG) + assertThat(expirations.expirationOf(key1)).extracting(TimeToLive::raw, InstanceOfAssertFactories.LONG) .isBetween(0L, 5L); assertThat(expirations.ttlOf(key1)).isBetween(Duration.ofSeconds(1), Duration.ofSeconds(5)); }); @@ -284,11 +283,11 @@ void testExpireAtAndGetExpireMillis() { assertThat(redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(500), List.of(key1, key2))) .satisfies(ExpireChanges::allOk); - assertThat(redisTemplate.opsForHash().getExpire(key, TimeUnit.MILLISECONDS, List.of(key1, key2))) + assertThat(redisTemplate.opsForHash().getTimeToLive(key, TimeUnit.MILLISECONDS, List.of(key1, key2))) .satisfies(expirations -> { assertThat(expirations.missing()).isEmpty(); assertThat(expirations.precision()).isEqualTo(TimeUnit.MILLISECONDS); - assertThat(expirations.expirationOf(key1)).extracting(Expiration::raw, InstanceOfAssertFactories.LONG) + assertThat(expirations.expirationOf(key1)).extracting(TimeToLive::raw, InstanceOfAssertFactories.LONG) .isBetween(0L, 500L); assertThat(expirations.ttlOf(key1)).isBetween(Duration.ZERO, Duration.ofMillis(500)); }); @@ -301,7 +300,7 @@ void expireThrowsErrorOfNanoPrecision() { HK key1 = hashKeyFactory.instance(); assertThatExceptionOfType(IllegalArgumentException.class) - .isThrownBy(() -> redisTemplate.opsForHash().getExpire(key, TimeUnit.NANOSECONDS, List.of(key1))); + .isThrownBy(() -> redisTemplate.opsForHash().getTimeToLive(key, TimeUnit.NANOSECONDS, List.of(key1))); } @ParameterizedRedisTest @@ -360,7 +359,7 @@ void testPersistAndGetExpireMillis() { assertThat(redisTemplate.opsForHash().persist(key, List.of(key2))).satisfies(ExpireChanges::allOk); - assertThat(redisTemplate.opsForHash().getExpire(key, List.of(key1, key2))).satisfies(expirations -> { + assertThat(redisTemplate.opsForHash().getTimeToLive(key, List.of(key1, key2))).satisfies(expirations -> { assertThat(expirations.expirationOf(key1).isPersistent()).isFalse(); assertThat(expirations.expirationOf(key2).isPersistent()).isTrue(); }); diff --git a/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java index a128a29293..2d574ee123 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java @@ -15,11 +15,10 @@ */ package org.springframework.data.redis.core; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assumptions.assumeThat; -import static org.junit.jupiter.api.condition.OS.MAC; +import static org.assertj.core.api.Assertions.*; +import static org.assertj.core.api.Assumptions.*; +import static org.junit.jupiter.api.condition.OS.*; -import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import reactor.test.StepVerifier; import java.time.Duration; @@ -34,10 +33,12 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.condition.DisabledOnOs; + import org.springframework.data.redis.ObjectFactory; import org.springframework.data.redis.RawObjectFactory; import org.springframework.data.redis.SettingsUtils; import org.springframework.data.redis.StringObjectFactory; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.RedisConnection; import org.springframework.data.redis.connection.RedisConnectionFactory; import org.springframework.data.redis.connection.convert.Converters; @@ -523,7 +524,7 @@ void testExpireAndGetExpireMillis() { assertThat(changes.allOk()).isTrue(); }).verifyComplete(); - hashOperations.getExpire(key, List.of(key1)) // + hashOperations.getTimeToLive(key, List.of(key1)) // .as(StepVerifier::create) // .assertNext(it -> { assertThat(it.expirationOf(key1).raw()).isBetween(0L, 2L); @@ -576,7 +577,7 @@ void testExpireAndGetExpireSeconds() { assertThat(changes.allOk()).isTrue(); }).verifyComplete(); - hashOperations.getExpire(key, TimeUnit.SECONDS, List.of(key1, key2)) // + hashOperations.getTimeToLive(key, TimeUnit.SECONDS, List.of(key1, key2)) // .as(StepVerifier::create) // .assertNext(it -> { assertThat(it.expirationOf(key1).raw()).isBetween(0L, 5L); @@ -603,7 +604,7 @@ void testExpireAtAndGetExpireMillis() { assertThat(changes.allOk()).isTrue(); }).verifyComplete(); - redisTemplate.opsForHash().getExpire(key, List.of(key1, key2)).as(StepVerifier::create)// + redisTemplate.opsForHash().getTimeToLive(key, List.of(key1, key2)).as(StepVerifier::create)// .assertNext(it -> { assertThat(it.expirationOf(key1).raw()).isBetween(0L, 2L); assertThat(it.expirationOf(key2).raw()).isBetween(0L, 2L); @@ -633,7 +634,7 @@ void testPersistAndGetExpireMillis() { assertThat(changes.allOk()).isTrue(); }).verifyComplete(); - redisTemplate.opsForHash().getExpire(key, List.of(key1, key2)).as(StepVerifier::create)// + redisTemplate.opsForHash().getTimeToLive(key, List.of(key1, key2)).as(StepVerifier::create)// .assertNext(expirations -> { assertThat(expirations.persistent()).contains(key1, key2); }).verifyComplete(); diff --git a/src/test/java/org/springframework/data/redis/core/ExpirationsUnitTest.java b/src/test/java/org/springframework/data/redis/core/ExpirationsUnitTest.java index 5fc1953d3d..33e16f6c6d 100644 --- a/src/test/java/org/springframework/data/redis/core/ExpirationsUnitTest.java +++ b/src/test/java/org/springframework/data/redis/core/ExpirationsUnitTest.java @@ -15,7 +15,7 @@ */ package org.springframework.data.redis.core; -import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.*; import java.time.Duration; import java.util.List; @@ -26,6 +26,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; + import org.springframework.data.redis.core.Expirations.Timeouts; /** @@ -44,7 +45,7 @@ void expirationMemorizesSourceUnit(TimeUnit targetUnit) { Expirations exp = Expirations.of(targetUnit, List.of(KEY_1), new Timeouts(TimeUnit.SECONDS, List.of(120L))); - assertThat(exp.expirations().get(0)).satisfies(expiration -> { + assertThat(exp.ttl().get(0)).satisfies(expiration -> { assertThat(expiration.raw()).isEqualTo(120L); assertThat(expiration.value()).isEqualTo(targetUnit.convert(120, TimeUnit.SECONDS)); }); diff --git a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java index 5ce8e54418..728258ade8 100644 --- a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java @@ -15,9 +15,8 @@ */ package org.springframework.data.redis.support.collections; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.assertj.core.api.Assumptions.assumeThat; +import static org.assertj.core.api.Assertions.*; +import static org.assertj.core.api.Assumptions.*; import java.io.IOException; import java.text.DecimalFormat; @@ -36,14 +35,15 @@ import org.assertj.core.api.Assumptions; import org.junit.jupiter.api.BeforeEach; + import org.springframework.dao.InvalidDataAccessApiUsageException; import org.springframework.data.redis.DoubleAsStringObjectFactory; import org.springframework.data.redis.LongAsStringObjectFactory; import org.springframework.data.redis.ObjectFactory; import org.springframework.data.redis.RawObjectFactory; import org.springframework.data.redis.RedisSystemException; -import org.springframework.data.redis.core.ExpireChanges; import org.springframework.data.redis.core.Cursor; +import org.springframework.data.redis.core.ExpireChanges; import org.springframework.data.redis.core.RedisCallback; import org.springframework.data.redis.core.RedisOperations; import org.springframework.data.redis.core.RedisTemplate; @@ -206,10 +206,10 @@ void testExpire() { Collection keys = Collections.singletonList(k1); assertThat(map.expire(Duration.ofSeconds(5), keys)).satisfies(ExpireChanges::allOk); - assertThat(map.getExpire(keys)).satisfies(expiration -> { + assertThat(map.getTimeToLive(keys)).satisfies(expiration -> { assertThat(expiration.expirationOf(k1).raw()).isBetween(1L, 5L); }); - assertThat(map.getExpire(TimeUnit.MILLISECONDS, keys)).satisfies(expiration -> { + assertThat(map.getTimeToLive(TimeUnit.MILLISECONDS, keys)).satisfies(expiration -> { assertThat(expiration.expirationOf(k1).raw()).isBetween(1000L, 5000L); }); assertThat(map.persist(keys)).satisfies(ExpireChanges::allOk); @@ -225,10 +225,10 @@ void testExpireAt() { Collection keys = Collections.singletonList(k1); assertThat(map.expireAt(Instant.now().plusSeconds(5), keys)).satisfies(ExpireChanges::allOk); - assertThat(map.getExpire(keys)).satisfies(expiration -> { + assertThat(map.getTimeToLive(keys)).satisfies(expiration -> { assertThat(expiration.expirationOf(k1).raw()).isBetween(1L, 5L); }); - assertThat(map.getExpire(TimeUnit.MILLISECONDS, keys)).satisfies(expiration -> { + assertThat(map.getTimeToLive(TimeUnit.MILLISECONDS, keys)).satisfies(expiration -> { assertThat(expiration.expirationOf(k1).raw()).isBetween(1000L, 5000L); }); assertThat(map.persist(keys)).satisfies(ExpireChanges::allOk); From c3c1e46c7ba5d9e009f0759b803d9b04a2a45841 Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Tue, 18 Feb 2025 09:11:16 +0100 Subject: [PATCH 08/13] Polishing. --- .../connection/StringRedisConnection.java | 35 +++++++------- .../data/redis/core/BoundHashOperations.java | 1 + .../redis/core/DefaultHashOperations.java | 3 +- .../core/DefaultReactiveHashOperations.java | 3 +- .../data/redis/core/HashOperations.java | 1 + .../redis/core/ReactiveHashOperations.java | 1 + .../redis/core/{ => types}/Expirations.java | 48 ++++++++++++------- .../support/collections/DefaultRedisMap.java | 2 +- .../redis/support/collections/RedisMap.java | 2 +- .../support/collections/RedisProperties.java | 2 +- ...DefaultHashOperationsIntegrationTests.java | 8 ++-- .../core/{ => types}/ExpirationsUnitTest.java | 17 +++---- 12 files changed, 71 insertions(+), 52 deletions(-) rename src/main/java/org/springframework/data/redis/core/{ => types}/Expirations.java (89%) rename src/test/java/org/springframework/data/redis/core/{ => types}/ExpirationsUnitTest.java (91%) diff --git a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java index 1a4ca013d7..5ff0e9946f 100644 --- a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java @@ -71,7 +71,6 @@ * @author Andrey Shlykov * @author ihaohong * @author Shyngys Sapraliyev - * * @see RedisCallback * @see RedisSerializer * @see StringRedisTemplate @@ -1661,7 +1660,6 @@ default Long lPos(String key, String element) { */ Long zRemRange(String key, long start, long end); - /** * Remove all elements between the lexicographical {@link Range}. * @@ -1941,7 +1939,8 @@ default Set zUnionWithScores(Aggregate aggregate, int[] weights, St * @return * @since 1.6 * @see Redis Documentation: ZRANGEBYLEX - * @see RedisZSetCommands#zRangeByLex(byte[], org.springframework.data.domain.Range, org.springframework.data.redis.connection.Limit) + * @see RedisZSetCommands#zRangeByLex(byte[], org.springframework.data.domain.Range, + * org.springframework.data.redis.connection.Limit) */ Set zRangeByLex(String key, org.springframework.data.domain.Range range, org.springframework.data.redis.connection.Limit limit); @@ -1983,7 +1982,8 @@ default Set zRevRangeByLex(String key, org.springframework.data.domain.R * @return * @since 2.4 * @see Redis Documentation: ZREVRANGEBYLEX - * @see RedisZSetCommands#zRevRangeByLex(byte[], org.springframework.data.domain.Range, org.springframework.data.redis.connection.Limit) + * @see RedisZSetCommands#zRevRangeByLex(byte[], org.springframework.data.domain.Range, + * org.springframework.data.redis.connection.Limit) */ Set zRevRangeByLex(String key, org.springframework.data.domain.Range range, org.springframework.data.redis.connection.Limit limit); @@ -2498,9 +2498,10 @@ List hpExpireAt(String key, long unixTimeInMillis, Hash.FieldExpirationOpt * * @param key must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in milliseconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. - * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: the time to live in milliseconds; or a + * negative value to signal an error. The command returns {@code -1} if the key exists but has no associated + * expiration time. The command returns {@code -2} if the key does not exist; {@literal null} when used in + * pipeline / transaction. * @see Redis Documentation: HTTL * @since 3.5 */ @@ -2513,9 +2514,10 @@ List hpExpireAt(String key, long unixTimeInMillis, Hash.FieldExpirationOpt * @param key must not be {@literal null}. * @param timeUnit must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in the {@link TimeUnit} provided; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. - * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: the time to live in the {@link TimeUnit} + * provided; or a negative value to signal an error. The command returns {@code -1} if the key exists but has + * no associated expiration time. The command returns {@code -2} if the key does not exist; {@literal null} + * when used in pipeline / transaction. * @see Redis Documentation: HTTL * @since 3.5 */ @@ -2527,9 +2529,10 @@ List hpExpireAt(String key, long unixTimeInMillis, Hash.FieldExpirationOpt * * @param key must not be {@literal null}. * @param fields must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in milliseconds; or a negative value - * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. - * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @return a list of {@link Long} values for each of the fields provided: the time to live in milliseconds; or a + * negative value to signal an error. The command returns {@code -1} if the key exists but has no associated + * expiration time. The command returns {@code -2} if the key does not exist; {@literal null} when used in + * pipeline / transaction. * @see Redis Documentation: HTTL * @since 3.5 */ @@ -2759,8 +2762,7 @@ GeoResults> geoRadiusByMember(String key, String member, Dis /** * Return the members of a geo set which are within the borders of the area specified by a given {@link GeoShape - * shape}. The query's center point is provided by - * {@link GeoReference}. + * shape}. The query's center point is provided by {@link GeoReference}. * * @param key must not be {@literal null}. * @param reference must not be {@literal null}. @@ -2776,8 +2778,7 @@ GeoResults> geoSearch(String key, GeoReference refer /** * Query the members of a geo set which are within the borders of the area specified by a given {@link GeoShape shape} - * and store the result at {@code destKey}. The query's center point is provided by - * {@link GeoReference}. + * and store the result at {@code destKey}. The query's center point is provided by {@link GeoReference}. * * @param key must not be {@literal null}. * @param reference must not be {@literal null}. diff --git a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java index 87e5f9735b..eb514f721c 100644 --- a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java @@ -25,6 +25,7 @@ import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.core.types.Expiration; +import org.springframework.data.redis.core.types.Expirations; import org.springframework.lang.Nullable; /** diff --git a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java index 2e9e5a2778..804617616f 100644 --- a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java @@ -29,8 +29,9 @@ import org.springframework.core.convert.converter.Converter; import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.convert.Converters; -import org.springframework.data.redis.core.Expirations.Timeouts; import org.springframework.data.redis.core.types.Expiration; +import org.springframework.data.redis.core.types.Expirations; +import org.springframework.data.redis.core.types.Expirations.Timeouts; import org.springframework.lang.Nullable; import org.springframework.util.Assert; diff --git a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java index 3a9f8aae1c..8c97c43fcd 100644 --- a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java @@ -36,8 +36,9 @@ import org.springframework.data.redis.connection.ReactiveHashCommands.ExpireCommand; import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse; import org.springframework.data.redis.connection.convert.Converters; -import org.springframework.data.redis.core.Expirations.Timeouts; import org.springframework.data.redis.core.types.Expiration; +import org.springframework.data.redis.core.types.Expirations; +import org.springframework.data.redis.core.types.Expirations.Timeouts; import org.springframework.data.redis.serializer.RedisSerializationContext; import org.springframework.lang.Nullable; import org.springframework.util.Assert; diff --git a/src/main/java/org/springframework/data/redis/core/HashOperations.java b/src/main/java/org/springframework/data/redis/core/HashOperations.java index 89bf1ac599..336c108827 100644 --- a/src/main/java/org/springframework/data/redis/core/HashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/HashOperations.java @@ -25,6 +25,7 @@ import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.core.types.Expiration; +import org.springframework.data.redis.core.types.Expirations; import org.springframework.lang.Nullable; /** diff --git a/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java b/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java index aac3c6a642..757dbcea76 100644 --- a/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java @@ -28,6 +28,7 @@ import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.core.types.Expiration; +import org.springframework.data.redis.core.types.Expirations; import org.springframework.lang.Nullable; /** diff --git a/src/main/java/org/springframework/data/redis/core/Expirations.java b/src/main/java/org/springframework/data/redis/core/types/Expirations.java similarity index 89% rename from src/main/java/org/springframework/data/redis/core/Expirations.java rename to src/main/java/org/springframework/data/redis/core/types/Expirations.java index 11e66fc86f..e92722fefd 100644 --- a/src/main/java/org/springframework/data/redis/core/Expirations.java +++ b/src/main/java/org/springframework/data/redis/core/types/Expirations.java @@ -13,12 +13,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.springframework.data.redis.core; +package org.springframework.data.redis.core.types; import java.time.Duration; +import java.util.Collection; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -39,9 +41,10 @@ * * * @author Christoph Strobl + * @author Mark Paluch * @since 3.5 */ -public class Expirations { // TODO: should we move this to let's say Hash.class or another place +public class Expirations { private final TimeUnit unit; private final Map expirations; @@ -87,7 +90,7 @@ public Set persistent() { } /** - * @return an ordered set of keys that do not exists and therefore do not have a time to live. + * @return an ordered set of keys that do not exist and therefore do not have a time to live. */ public Set missing() { return filterByState(TimeToLive.MISSING); @@ -95,7 +98,7 @@ public Set missing() { /** * @return an ordered set of all {@link Expirations expirations} where the {@link TimeToLive#value()} is using the - * {@link TimeUnit} defined in {@link #precision()}. + * {@link TimeUnit} defined in {@link #timeUnit()}. */ public List ttl() { return expirations.values().stream().map(it -> it.convert(this.unit)).toList(); @@ -104,7 +107,7 @@ public List ttl() { /** * @return the {@link TimeUnit} for {@link TimeToLive expirations} held by this instance. */ - public TimeUnit precision() { + public TimeUnit timeUnit() { return unit; } @@ -114,21 +117,30 @@ public TimeUnit precision() { * skipped. */ public List> expiring() { + return expirations.entrySet().stream().filter(it -> !it.getValue().isMissing() && !it.getValue().isPersistent()) .map(it -> Map.entry(it.getKey(), toDuration(it.getValue()))).toList(); } + /** + * @return the ordered collection of keys that are associated with an expiration. + */ + public Collection keys() { + return expirations.keySet(); + } + /** * @param key * @return the {@link Expirations expirations} where the {@link TimeToLive#value()} is using the {@link TimeUnit} - * defined in {@link #precision()} or {@literal null} if no entry could be found. + * defined in {@link #timeUnit()} or {@literal null} if no entry could be found. + * @throws NoSuchElementException if no expiration found for the given key. */ - @Nullable public TimeToLive expirationOf(K key) { TimeToLive timeToLive = expirations.get(key); + if (timeToLive == null) { - return null; + throw new NoSuchElementException("No expiration found for key '%s'".formatted(key)); } return timeToLive.convert(this.unit); @@ -142,12 +154,7 @@ public TimeToLive expirationOf(K key) { */ @Nullable public Duration ttlOf(K key) { - - TimeToLive timeToLive = expirationOf(key); - if (timeToLive == null) { - return null; - } - return toDuration(timeToLive); + return toDuration(expirationOf(key)); } private Set filterByState(TimeToLive filter) { @@ -161,6 +168,7 @@ static Duration toDuration(TimeToLive timeToLive) { if (timeToLive.sourceUnit == null) { return null; } + return Duration.of(timeToLive.raw(), timeToLive.sourceUnit.toChronoUnit()); } @@ -173,6 +181,7 @@ Long get(int index) { public int size() { return raw.size(); } + } /** @@ -182,10 +191,10 @@ public int size() { * {@link #PERSISTENT} mark predefined states returned by Redis indicating a time to live value could not be retrieved * due to various reasons. */ - public static class TimeToLive { // TODO: is Expiry a better name for this type? + public static class TimeToLive { /** - * Predefined {@link TimeToLive} for a key that does not exists and therefore does not have a time to live. + * Predefined {@link TimeToLive} for a key that does not exist and therefore does not have a time to live. */ public static TimeToLive MISSING = new TimeToLive(-2L); @@ -194,9 +203,10 @@ public static class TimeToLive { // TODO: is Expiry a better name for this type? */ public static TimeToLive PERSISTENT = new TimeToLive(-1L); + final @Nullable TimeUnit sourceUnit; + final @Nullable TimeUnit targetUnit; + private final long raw; - @Nullable TimeUnit sourceUnit; - @Nullable TimeUnit targetUnit; public TimeToLive(long value) { this(value, null); @@ -301,5 +311,7 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(raw); } + } + } diff --git a/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java b/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java index 8cc271d9f4..938b631e5d 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java +++ b/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java @@ -27,11 +27,11 @@ import org.springframework.data.redis.connection.DataType; import org.springframework.data.redis.core.BoundHashOperations; import org.springframework.data.redis.core.Cursor; -import org.springframework.data.redis.core.Expirations; import org.springframework.data.redis.core.ExpireChanges; import org.springframework.data.redis.core.RedisOperations; import org.springframework.data.redis.core.ScanOptions; import org.springframework.data.redis.core.SessionCallback; +import org.springframework.data.redis.core.types.Expirations; import org.springframework.lang.Nullable; /** diff --git a/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java b/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java index 0ad2345454..98a592c932 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java +++ b/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java @@ -23,8 +23,8 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; -import org.springframework.data.redis.core.Expirations; import org.springframework.data.redis.core.ExpireChanges; +import org.springframework.data.redis.core.types.Expirations; import org.springframework.lang.Nullable; /** diff --git a/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java b/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java index 0819414821..3ddf47f098 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java +++ b/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java @@ -33,9 +33,9 @@ import org.springframework.data.redis.connection.DataType; import org.springframework.data.redis.core.BoundHashOperations; -import org.springframework.data.redis.core.Expirations; import org.springframework.data.redis.core.ExpireChanges; import org.springframework.data.redis.core.RedisOperations; +import org.springframework.data.redis.core.types.Expirations; import org.springframework.lang.Nullable; /** diff --git a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java index f34ee94e0b..79c16075e3 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java @@ -36,8 +36,8 @@ import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.jedis.JedisConnectionFactory; import org.springframework.data.redis.connection.jedis.extension.JedisConnectionFactoryExtension; -import org.springframework.data.redis.core.Expirations.TimeToLive; import org.springframework.data.redis.core.ExpireChanges.ExpiryChangeState; +import org.springframework.data.redis.core.types.Expirations.TimeToLive; import org.springframework.data.redis.test.condition.EnabledOnCommand; import org.springframework.data.redis.test.extension.RedisStanalone; import org.springframework.data.redis.test.extension.parametrized.MethodSource; @@ -230,7 +230,7 @@ void testExpireAndGetExpireMillis() { assertThat(redisTemplate.opsForHash().getTimeToLive(key, List.of(key1))).satisfies(expirations -> { assertThat(expirations.missing()).isEmpty(); - assertThat(expirations.precision()).isEqualTo(TimeUnit.SECONDS); + assertThat(expirations.timeUnit()).isEqualTo(TimeUnit.SECONDS); assertThat(expirations.expirationOf(key1)).extracting(TimeToLive::raw, InstanceOfAssertFactories.LONG) .isBetween(0L, 1L); assertThat(expirations.ttlOf(key1)).isBetween(Duration.ZERO, Duration.ofSeconds(1)); @@ -261,7 +261,7 @@ void testExpireAndGetExpireSeconds() { assertThat(redisTemplate.opsForHash().getTimeToLive(key, TimeUnit.SECONDS, List.of(key1, key2))) .satisfies(expirations -> { assertThat(expirations.missing()).isEmpty(); - assertThat(expirations.precision()).isEqualTo(TimeUnit.SECONDS); + assertThat(expirations.timeUnit()).isEqualTo(TimeUnit.SECONDS); assertThat(expirations.expirationOf(key1)).extracting(TimeToLive::raw, InstanceOfAssertFactories.LONG) .isBetween(0L, 5L); assertThat(expirations.ttlOf(key1)).isBetween(Duration.ofSeconds(1), Duration.ofSeconds(5)); @@ -286,7 +286,7 @@ void testExpireAtAndGetExpireMillis() { assertThat(redisTemplate.opsForHash().getTimeToLive(key, TimeUnit.MILLISECONDS, List.of(key1, key2))) .satisfies(expirations -> { assertThat(expirations.missing()).isEmpty(); - assertThat(expirations.precision()).isEqualTo(TimeUnit.MILLISECONDS); + assertThat(expirations.timeUnit()).isEqualTo(TimeUnit.MILLISECONDS); assertThat(expirations.expirationOf(key1)).extracting(TimeToLive::raw, InstanceOfAssertFactories.LONG) .isBetween(0L, 500L); assertThat(expirations.ttlOf(key1)).isBetween(Duration.ZERO, Duration.ofMillis(500)); diff --git a/src/test/java/org/springframework/data/redis/core/ExpirationsUnitTest.java b/src/test/java/org/springframework/data/redis/core/types/ExpirationsUnitTest.java similarity index 91% rename from src/test/java/org/springframework/data/redis/core/ExpirationsUnitTest.java rename to src/test/java/org/springframework/data/redis/core/types/ExpirationsUnitTest.java index 33e16f6c6d..2b310ded92 100644 --- a/src/test/java/org/springframework/data/redis/core/ExpirationsUnitTest.java +++ b/src/test/java/org/springframework/data/redis/core/types/ExpirationsUnitTest.java @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.springframework.data.redis.core; +package org.springframework.data.redis.core.types; import static org.assertj.core.api.Assertions.*; @@ -27,11 +27,12 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; -import org.springframework.data.redis.core.Expirations.Timeouts; +import org.springframework.data.redis.core.types.Expirations.Timeouts; /** + * Unit test for {@link Expirations} + * * @author Christoph Strobl - * @since 2025/02 */ class ExpirationsUnitTest { @@ -39,7 +40,7 @@ class ExpirationsUnitTest { static final String KEY_2 = "key-2"; static final String KEY_3 = "key-3"; - @ParameterizedTest + @ParameterizedTest // GH-3054 @EnumSource(TimeUnit.class) void expirationMemorizesSourceUnit(TimeUnit targetUnit) { @@ -51,7 +52,7 @@ void expirationMemorizesSourceUnit(TimeUnit targetUnit) { }); } - @Test + @Test // GH-3054 void expirationsCategorizesElements() { Expirations exp = createExpirations(new Timeouts(TimeUnit.SECONDS, List.of(-2L, -1L, 120L))); @@ -61,7 +62,7 @@ void expirationsCategorizesElements() { assertThat(exp.expiring()).containsExactly(Map.entry(KEY_3, Duration.ofMinutes(2))); } - @Test + @Test // GH-3054 void returnsNullForMissingElements() { Expirations exp = createExpirations(new Timeouts(TimeUnit.SECONDS, List.of(-2L, -1L, 120L))); @@ -70,7 +71,7 @@ void returnsNullForMissingElements() { assertThat(exp.ttlOf("missing")).isNull(); } - @Test + @Test // GH-3054 void ttlReturnsDurationForEntriesWithTimeout() { Expirations exp = createExpirations(new Timeouts(TimeUnit.SECONDS, List.of(-2L, -1L, 120L))); @@ -78,7 +79,7 @@ void ttlReturnsDurationForEntriesWithTimeout() { assertThat(exp.ttlOf(KEY_3)).isEqualTo(Duration.ofMinutes(2)); } - @Test + @Test // GH-3054 void ttlReturnsNullForPersistentAndMissingEntries() { Expirations exp = createExpirations(new Timeouts(TimeUnit.SECONDS, List.of(-2L, -1L, 120L))); From d7502409f451f9ff86f40e32196f05912fb22091 Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Tue, 4 Mar 2025 12:36:01 +0100 Subject: [PATCH 09/13] Polishing. Extract BoundHashFieldExpirationOperations to provide a fluent way of interacting with expirations on BoundHashOps and RedisMap. --- .../data/redis/connection/Hash.java | 8 ++ .../BoundHashFieldExpirationOperations.java | 126 ++++++++++++++++++ .../data/redis/core/BoundHashOperations.java | 126 ++++++------------ ...ultBoundHashFieldExpirationOperations.java | 93 +++++++++++++ .../data/redis/core/HashOperations.java | 37 +++++ .../data/redis/core/types/Expirations.java | 44 ++++-- .../support/collections/DefaultRedisMap.java | 28 +--- .../redis/support/collections/RedisMap.java | 85 +++--------- .../support/collections/RedisProperties.java | 40 +----- ...DefaultHashOperationsIntegrationTests.java | 32 +++++ .../redis/core/types/ExpirationsUnitTest.java | 9 ++ .../AbstractRedisMapIntegrationTests.java | 23 ++-- .../RedisPropertiesIntegrationTests.java | 6 - 13 files changed, 415 insertions(+), 242 deletions(-) create mode 100644 src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java create mode 100644 src/main/java/org/springframework/data/redis/core/DefaultBoundHashFieldExpirationOperations.java diff --git a/src/main/java/org/springframework/data/redis/connection/Hash.java b/src/main/java/org/springframework/data/redis/connection/Hash.java index dcf38d99e5..51e326dd2b 100644 --- a/src/main/java/org/springframework/data/redis/connection/Hash.java +++ b/src/main/java/org/springframework/data/redis/connection/Hash.java @@ -21,11 +21,16 @@ import org.springframework.util.ObjectUtils; /** + * Types for interacting with Hash data structures. + * * @author Christoph Strobl * @since 3.5 */ public interface Hash { + /** + * Expiration options for Hash Expiation updates. + */ class FieldExpirationOptions { private static final FieldExpirationOptions NONE = new FieldExpirationOptions(Condition.ALWAYS); @@ -124,6 +129,9 @@ public enum Condition { * Set expiration only when the new expiration is greater than current one. */ LT + } + } + } diff --git a/src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java b/src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java new file mode 100644 index 0000000000..33e0ff82e6 --- /dev/null +++ b/src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java @@ -0,0 +1,126 @@ +/* + * Copyright 2025 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.core; + +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.TimeUnit; + +import org.springframework.data.redis.connection.Hash; +import org.springframework.data.redis.core.types.Expiration; +import org.springframework.data.redis.core.types.Expirations; +import org.springframework.lang.Nullable; + +/** + * Hash Field Expiration operations bound to a certain hash key and set of hash fields. + * + * @param type of the hash field names. + * @author Mark Paluch + * @since 3.5 + */ +public interface BoundHashFieldExpirationOperations { + + /** + * Apply {@link Expiration} to the hash without any additional constraints. + * + * @param expiration the expiration definition. + * @return changes to the hash fields. + */ + default ExpireChanges expire(Expiration expiration) { + return expire(expiration, Hash.FieldExpirationOptions.none()); + } + + /** + * Apply {@link Expiration} to the hash fields given {@link Hash.FieldExpirationOptions expiration options}. + * + * @param expiration the expiration definition. + * @param options expiration options. + * @return changes to the hash fields. + */ + ExpireChanges expire(Expiration expiration, Hash.FieldExpirationOptions options); + + /** + * Set time to live for given {@code hashKey}. + * + * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. + * @throws IllegalArgumentException if the timeout is {@literal null}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + ExpireChanges expire(Duration timeout); + + /** + * Set the expiration for given {@code hashKey} as a {@literal date} timestamp. + * + * @param expireAt must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. + * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + ExpireChanges expireAt(Instant expireAt); + + /** + * Remove the expiration from given {@code hashKey} . + * + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is + * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + @Nullable + ExpireChanges persist(); + + /** + * Get the time to live for {@code hashKey} in seconds. + * + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + Expirations getTimeToLive(); + + /** + * Get the time to live for {@code hashKey} and convert it to the given {@link TimeUnit}. + * + * @param timeUnit must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + Expirations getTimeToLive(TimeUnit timeUnit); + +} diff --git a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java index eb514f721c..0d287e929f 100644 --- a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java @@ -15,17 +15,12 @@ */ package org.springframework.data.redis.core; -import java.time.Duration; -import java.time.Instant; +import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.TimeUnit; -import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; -import org.springframework.data.redis.core.types.Expiration; -import org.springframework.data.redis.core.types.Expirations; import org.springframework.lang.Nullable; /** @@ -160,88 +155,6 @@ public interface BoundHashOperations extends BoundKeyOperations { @Nullable Long lengthOfValue(HK hashKey); - default ExpireChanges expire(Expiration expiration, Collection hashKeys) { - return expire(expiration, FieldExpirationOptions.none(), hashKeys); - } - - ExpireChanges expire(Expiration expiration, FieldExpirationOptions options, Collection hashKeys); - - /** - * Set time to live for given {@code hashKey} . - * - * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. - * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is - * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time - * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition - * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / - * transaction. - * @throws IllegalArgumentException if the timeout is {@literal null}. - * @see Redis Documentation: HEXPIRE - * @since 3.5 - */ - @Nullable - ExpireChanges expire(Duration timeout, Collection hashKeys); - - /** - * Set the expiration for given {@code hashKey} as a {@literal date} timestamp. - * - * @param expireAt must not be {@literal null}. - * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is - * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating - * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | - * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in - * pipeline / transaction. - * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. - * @see Redis Documentation: HEXPIRE - * @since 3.5 - */ - @Nullable - ExpireChanges expireAt(Instant expireAt, Collection hashKeys); - - /** - * Remove the expiration from given {@code hashKey} . - * - * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is - * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such - * field; {@literal null} when used in pipeline / transaction. - * @see Redis Documentation: HPERSIST - * @since 3.5 - */ - @Nullable - ExpireChanges persist(Collection hashKeys); - - /** - * Get the time to live for {@code hashKey} in seconds. - * - * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative - * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration - * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / - * transaction. - * @see Redis Documentation: HTTL - * @since 3.5 - */ - @Nullable - Expirations getTimeToLive(Collection hashKeys); - - /** - * Get the time to live for {@code hashKey} and convert it to the given {@link TimeUnit}. - * - * @param timeUnit must not be {@literal null}. - * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative - * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration - * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / - * transaction. - * @see Redis Documentation: HTTL - * @since 3.5 - */ - @Nullable - Expirations getTimeToLive(TimeUnit timeUnit, Collection hashKeys); - /** * Get size of hash at the bound key. * @@ -302,8 +215,45 @@ default ExpireChanges expire(Expiration expiration, Collection hashKeys) */ Cursor> scan(ScanOptions options); + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at the + * bound {@code key}. Operations on the expiration object obtain keys at the time of invoking any expiration + * operation. + * + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + default BoundHashFieldExpirationOperations expiration() { + return new DefaultBoundHashFieldExpirationOperations<>(getOperations().opsForHash(), getKey(), this::keys); + } + + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at the + * bound {@code key} for the given hash fields. + * + * @param hashFields collection of hash fields to operate on. + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + default BoundHashFieldExpirationOperations expiration(HK... hashFields) { + return expiration(Arrays.asList(hashFields)); + } + + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at the + * bound {@code key} for the given hash fields. + * + * @param hashFields collection of hash fields to operate on. + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + default BoundHashFieldExpirationOperations expiration(Collection hashFields) { + return new DefaultBoundHashFieldExpirationOperations<>(getOperations().opsForHash(), getKey(), () -> hashFields); + } + /** * @return never {@literal null}. */ RedisOperations getOperations(); + } diff --git a/src/main/java/org/springframework/data/redis/core/DefaultBoundHashFieldExpirationOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultBoundHashFieldExpirationOperations.java new file mode 100644 index 0000000000..8dbabe6dd5 --- /dev/null +++ b/src/main/java/org/springframework/data/redis/core/DefaultBoundHashFieldExpirationOperations.java @@ -0,0 +1,93 @@ +/* + * Copyright 2025 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.core; + +import java.time.Duration; +import java.time.Instant; +import java.util.Collection; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import org.springframework.data.redis.connection.Hash; +import org.springframework.data.redis.core.types.Expiration; +import org.springframework.data.redis.core.types.Expirations; +import org.springframework.lang.Nullable; +import org.springframework.util.Assert; + +/** + * Default implementation of {@link BoundHashFieldExpirationOperations}. + * + * @author Mark Paluch + * @since 3.5 + */ +class DefaultBoundHashFieldExpirationOperations implements BoundHashFieldExpirationOperations { + + private final HashOperations operations; + private final H key; + private final Supplier> hashFields; + + public DefaultBoundHashFieldExpirationOperations(HashOperations operations, H key, + Supplier> hashFields) { + + this.operations = operations; + this.key = key; + this.hashFields = hashFields; + } + + @Override + public ExpireChanges expire(Expiration expiration, Hash.FieldExpirationOptions options) { + return operations.expire(key, expiration, options, getHashKeys()); + } + + @Nullable + @Override + public ExpireChanges expire(Duration timeout) { + return operations.expire(key, timeout, getHashKeys()); + } + + @Nullable + @Override + public ExpireChanges expireAt(Instant expireAt) { + return operations.expireAt(key, expireAt, getHashKeys()); + } + + @Nullable + @Override + public ExpireChanges persist() { + return operations.persist(key, getHashKeys()); + } + + @Nullable + @Override + public Expirations getTimeToLive() { + return operations.getTimeToLive(key, getHashKeys()); + } + + @Nullable + @Override + public Expirations getTimeToLive(TimeUnit timeUnit) { + return operations.getTimeToLive(key, timeUnit, getHashKeys()); + } + + private Collection getHashKeys() { + + Collection hks = hashFields.get(); + + Assert.state(hks != null, "Hash keys must not be null"); + return hks; + } + +} diff --git a/src/main/java/org/springframework/data/redis/core/HashOperations.java b/src/main/java/org/springframework/data/redis/core/HashOperations.java index 336c108827..67c4ce0fa8 100644 --- a/src/main/java/org/springframework/data/redis/core/HashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/HashOperations.java @@ -17,6 +17,7 @@ import java.time.Duration; import java.time.Instant; +import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; @@ -330,6 +331,42 @@ default Expirations getTimeToLive(H key, Collection hashKeys) { @Nullable Expirations getTimeToLive(H key, TimeUnit timeUnit, Collection hashKeys); + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at + * {@code key}. Operations on the expiration object obtain keys at the time of invoking any expiration operation. + * + * @param key must not be {@literal null}. + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + default BoundHashFieldExpirationOperations expiration(H key) { + return new DefaultBoundHashFieldExpirationOperations<>(this, key, () -> keys(key)); + } + + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at + * {@code key} for the given hash fields. + * + * @param hashFields collection of hash fields to operate on. + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + default BoundHashFieldExpirationOperations expiration(H key, HK... hashFields) { + return expiration(key, Arrays.asList(hashFields)); + } + + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at + * {@code key} for the given hash fields. + * + * @param hashFields collection of hash fields to operate on. + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + default BoundHashFieldExpirationOperations expiration(H key, Collection hashFields) { + return new DefaultBoundHashFieldExpirationOperations<>(this, key, () -> hashFields); + } + /** * @return never {@literal null}. */ diff --git a/src/main/java/org/springframework/data/redis/core/types/Expirations.java b/src/main/java/org/springframework/data/redis/core/types/Expirations.java index e92722fefd..7101cb60fa 100644 --- a/src/main/java/org/springframework/data/redis/core/types/Expirations.java +++ b/src/main/java/org/springframework/data/redis/core/types/Expirations.java @@ -20,7 +20,6 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Map; -import java.util.NoSuchElementException; import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -133,14 +132,14 @@ public Collection keys() { * @param key * @return the {@link Expirations expirations} where the {@link TimeToLive#value()} is using the {@link TimeUnit} * defined in {@link #timeUnit()} or {@literal null} if no entry could be found. - * @throws NoSuchElementException if no expiration found for the given key. */ + @Nullable public TimeToLive expirationOf(K key) { TimeToLive timeToLive = expirations.get(key); if (timeToLive == null) { - throw new NoSuchElementException("No expiration found for key '%s'".formatted(key)); + return null; } return timeToLive.convert(this.unit); @@ -163,15 +162,21 @@ private Set filterByState(TimeToLive filter) { } @Nullable - static Duration toDuration(TimeToLive timeToLive) { + static Duration toDuration(@Nullable TimeToLive timeToLive) { - if (timeToLive.sourceUnit == null) { + if (timeToLive == null || timeToLive.sourceUnit == null) { return null; } return Duration.of(timeToLive.raw(), timeToLive.sourceUnit.toChronoUnit()); } + /** + * Collection of timeouts associated with a {@link TimeUnit}. + * + * @param timeUnit + * @param raw + */ public record Timeouts(TimeUnit timeUnit, List raw) { Long get(int index) { @@ -203,20 +208,19 @@ public static class TimeToLive { */ public static TimeToLive PERSISTENT = new TimeToLive(-1L); - final @Nullable TimeUnit sourceUnit; - final @Nullable TimeUnit targetUnit; - + private final @Nullable TimeUnit sourceUnit; + private final @Nullable TimeUnit targetUnit; private final long raw; - public TimeToLive(long value) { + TimeToLive(long value) { this(value, null); } - public TimeToLive(long value, @Nullable TimeUnit sourceUnit) { + TimeToLive(long value, @Nullable TimeUnit sourceUnit) { this(value, sourceUnit, null); } - public TimeToLive(long value, @Nullable TimeUnit sourceUnit, @Nullable TimeUnit targetUnit) { + TimeToLive(long value, @Nullable TimeUnit sourceUnit, @Nullable TimeUnit targetUnit) { this.raw = value; this.sourceUnit = sourceUnit; this.targetUnit = targetUnit; @@ -226,9 +230,11 @@ public TimeToLive(long value, @Nullable TimeUnit sourceUnit, @Nullable TimeUnit * Factory method for creating {@link TimeToLive} instances, returning predefined ones if the value matches a known * reserved state. * + * @param value the TTL value. + * @param timeUnit time unit for the given value. * @return the {@link TimeToLive} for the given raw value. */ - static TimeToLive of(Number value, TimeUnit timeUnit) { + public static TimeToLive of(Number value, TimeUnit timeUnit) { return switch (value.intValue()) { case -2 -> MISSING; @@ -276,18 +282,19 @@ public TimeToLive convert(TimeUnit timeUnit) { * @return {@literal true} if key exists but does not expire. */ public boolean isPersistent() { - return PERSISTENT.equals(this); + return PERSISTENT.raw() == raw(); } /** * @return {@literal true} if key does not exist and therefore does not have a time to live. */ public boolean isMissing() { - return MISSING.equals(this); + return MISSING.raw() == raw(); } @Override public boolean equals(Object o) { + if (o == this) { return true; } @@ -312,6 +319,15 @@ public int hashCode() { return Objects.hash(raw); } + @Override + public String toString() { + + return switch ((int) raw()) { + case -2 -> "MISSING"; + case -1 -> "PERSISTENT"; + default -> "%d %s".formatted(raw(), sourceUnit); + }; + } } } diff --git a/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java b/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java index 938b631e5d..18c3c24e20 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java +++ b/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java @@ -15,8 +15,6 @@ */ package org.springframework.data.redis.support.collections; -import java.time.Duration; -import java.time.Instant; import java.util.Collection; import java.util.Collections; import java.util.Date; @@ -25,13 +23,12 @@ import java.util.concurrent.TimeUnit; import org.springframework.data.redis.connection.DataType; +import org.springframework.data.redis.core.BoundHashFieldExpirationOperations; import org.springframework.data.redis.core.BoundHashOperations; import org.springframework.data.redis.core.Cursor; -import org.springframework.data.redis.core.ExpireChanges; import org.springframework.data.redis.core.RedisOperations; import org.springframework.data.redis.core.ScanOptions; import org.springframework.data.redis.core.SessionCallback; -import org.springframework.data.redis.core.types.Expirations; import org.springframework.lang.Nullable; /** @@ -327,28 +324,13 @@ public Cursor> scan() { } @Override - public ExpireChanges expire(Duration timeout, Collection hashKeys) { - return hashOps.expire(timeout, hashKeys); + public BoundHashFieldExpirationOperations expiration() { + return hashOps.expiration(); } @Override - public ExpireChanges expireAt(Instant expireAt, Collection hashKeys) { - return hashOps.expireAt(expireAt, hashKeys); - } - - @Override - public ExpireChanges persist(Collection hashKeys) { - return hashOps.persist(hashKeys); - } - - @Override - public Expirations getTimeToLive(Collection hashKeys) { - return hashOps.getTimeToLive(hashKeys); - } - - @Override - public Expirations getTimeToLive(TimeUnit timeUnit, Collection hashKeys) { - return hashOps.getTimeToLive(timeUnit, hashKeys); + public BoundHashFieldExpirationOperations expiration(Collection hashFields) { + return hashOps.expiration(hashFields); } private void checkResult(@Nullable Object obj) { diff --git a/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java b/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java index 98a592c932..955942eb25 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java +++ b/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java @@ -15,16 +15,13 @@ */ package org.springframework.data.redis.support.collections; -import java.time.Duration; -import java.time.Instant; +import java.util.Arrays; import java.util.Collection; import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; -import org.springframework.data.redis.core.ExpireChanges; -import org.springframework.data.redis.core.types.Expirations; +import org.springframework.data.redis.core.BoundHashFieldExpirationOperations; import org.springframework.lang.Nullable; /** @@ -33,6 +30,7 @@ * @author Costin Leau * @author Christoph Strobl * @author Tihomi Mateev + * @author Mark Paluch */ public interface RedisMap extends RedisStore, ConcurrentMap { @@ -80,79 +78,34 @@ public interface RedisMap extends RedisStore, ConcurrentMap { Iterator> scan(); /** - * Set time to live for given {@code hashKeys}. + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at + * {@code key}. Operations on the expiration object obtain keys at the time of invoking any expiration operation. * - * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. - * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is - * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time - * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition - * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / - * transaction. - * @throws IllegalArgumentException if the timeout is {@literal null}. - * @see Redis Documentation: HEXPIRE + * @return the bound operations object to perform operations on the hash field expiration. * @since 3.5 */ - @Nullable - ExpireChanges expire(Duration timeout, Collection hashKeys); - - /** - * Set the expiration for given {@code hashKeys} as a {@literal date} timestamp. - * - * @param expireAt must not be {@literal null}. - * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is - * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating - * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | - * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in - * pipeline / transaction. - * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. - * @see Redis Documentation: HEXPIRE - * @since 3.5 - */ - @Nullable - ExpireChanges expireAt(Instant expireAt, Collection hashKeys); + BoundHashFieldExpirationOperations expiration(); /** - * Remove the expiration from given {@code hashKeys}. + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at the + * bound {@code key} for the given hash fields. * - * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is - * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such - * field; {@literal null} when used in pipeline / transaction. - * @see Redis Documentation: HPERSIST + * @param hashFields collection of hash fields to operate on. + * @return the bound operations object to perform operations on the hash field expiration. * @since 3.5 */ - @Nullable - ExpireChanges persist(Collection hashKeys); + default BoundHashFieldExpirationOperations expiration(K... hashFields) { + return expiration(Arrays.asList(hashFields)); + } /** - * Get the time to live for {@code hashKeys} in seconds. + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at the + * bound {@code key} for the given hash fields. * - * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative - * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration - * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / - * transaction. - * @see Redis Documentation: HTTL + * @param hashFields collection of hash fields to operate on. + * @return the bound operations object to perform operations on the hash field expiration. * @since 3.5 */ - @Nullable - Expirations getTimeToLive(Collection hashKeys); - - /** - * Get the time to live for {@code hashKeys} and convert it to the given {@link TimeUnit}. - * - * @param timeUnit must not be {@literal null}. - * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative - * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration - * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / - * transaction. - * @see Redis Documentation: HTTL - * @since 3.5 - */ - @Nullable - Expirations getTimeToLive(TimeUnit timeUnit, Collection hashKeys); + BoundHashFieldExpirationOperations expiration(Collection hashFields); } diff --git a/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java b/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java index 3ddf47f098..c6a5f0a45d 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java +++ b/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java @@ -17,8 +17,6 @@ import java.io.IOException; import java.io.OutputStream; -import java.time.Duration; -import java.time.Instant; import java.util.Collection; import java.util.Collections; import java.util.Date; @@ -32,10 +30,9 @@ import java.util.concurrent.TimeUnit; import org.springframework.data.redis.connection.DataType; +import org.springframework.data.redis.core.BoundHashFieldExpirationOperations; import org.springframework.data.redis.core.BoundHashOperations; -import org.springframework.data.redis.core.ExpireChanges; import org.springframework.data.redis.core.RedisOperations; -import org.springframework.data.redis.core.types.Expirations; import org.springframework.lang.Nullable; /** @@ -306,42 +303,17 @@ public synchronized void storeToXML(OutputStream os, String comment) throws IOEx @Override public Iterator> scan() { - throw new UnsupportedOperationException(); - } - - @Override - public ExpireChanges expire(Duration timeout, Collection hashKeys) { - - Collection keys = hashKeys.stream().map(key -> (String) key).toList(); - return (ExpireChanges) hashOps.expire(timeout, keys); + return (Iterator) delegate.scan(); } @Override - public ExpireChanges expireAt(Instant expireAt, Collection hashKeys) { - - Collection keys = hashKeys.stream().map(key -> (String) key).toList(); - return (ExpireChanges) hashOps.expireAt(expireAt, keys); + public BoundHashFieldExpirationOperations expiration() { + return (BoundHashFieldExpirationOperations) delegate.expiration(); } @Override - public ExpireChanges persist(Collection hashKeys) { - - Collection keys = hashKeys.stream().map(key -> (String) key).toList(); - return (ExpireChanges) hashOps.persist(keys); - } - - @Override - public Expirations getTimeToLive(Collection hashKeys) { - - Collection keys = hashKeys.stream().map(key -> (String) key).toList(); - return (Expirations) hashOps.getTimeToLive(keys); - } - - @Override - public Expirations getTimeToLive(TimeUnit timeUnit, Collection hashKeys) { - - Collection keys = hashKeys.stream().map(key -> (String) key).toList(); - return (Expirations) hashOps.getTimeToLive(timeUnit, keys); + public BoundHashFieldExpirationOperations expiration(Collection hashFields) { + return (BoundHashFieldExpirationOperations) delegate.expiration((Collection) hashFields); } } diff --git a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java index 79c16075e3..6fd9671f27 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java @@ -268,6 +268,38 @@ void testExpireAndGetExpireSeconds() { }); } + @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") + void testBoundExpireAndGetExpireSeconds() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + BoundHashOperations hashOps = redisTemplate.boundHashOps(key); + BoundHashFieldExpirationOperations exp = hashOps.expiration(key1, key2); + + assertThat(exp.expire(Duration.ofSeconds(5))).satisfies(changes -> { + assertThat(changes.allOk()).isTrue(); + assertThat(changes.stateOf(key1)).isEqualTo(ExpiryChangeState.OK); + assertThat(changes.ok()).containsExactlyInAnyOrder(key1, key2); + assertThat(changes.missed()).isEmpty(); + assertThat(changes.stateChanges()).map(ExpiryChangeState::value).containsExactly(1L, 1L); + }); + + assertThat(exp.getTimeToLive(TimeUnit.SECONDS)).satisfies(expirations -> { + assertThat(expirations.missing()).isEmpty(); + assertThat(expirations.timeUnit()).isEqualTo(TimeUnit.SECONDS); + assertThat(expirations.expirationOf(key1)).extracting(TimeToLive::raw, InstanceOfAssertFactories.LONG) + .isBetween(0L, 5L); + assertThat(expirations.ttlOf(key1)).isBetween(Duration.ofSeconds(1), Duration.ofSeconds(5)); + }); + } + @ParameterizedRedisTest @EnabledOnCommand("HEXPIRE") void testExpireAtAndGetExpireMillis() { diff --git a/src/test/java/org/springframework/data/redis/core/types/ExpirationsUnitTest.java b/src/test/java/org/springframework/data/redis/core/types/ExpirationsUnitTest.java index 2b310ded92..db5ba98605 100644 --- a/src/test/java/org/springframework/data/redis/core/types/ExpirationsUnitTest.java +++ b/src/test/java/org/springframework/data/redis/core/types/ExpirationsUnitTest.java @@ -33,6 +33,7 @@ * Unit test for {@link Expirations} * * @author Christoph Strobl + * @author Mark Paluch */ class ExpirationsUnitTest { @@ -88,6 +89,14 @@ void ttlReturnsNullForPersistentAndMissingEntries() { assertThat(exp.ttlOf(KEY_2)).isNull(); } + @Test // GH-3054 + void shouldRenderToString() { + + assertThat(Expirations.TimeToLive.PERSISTENT).hasToString("PERSISTENT"); + assertThat(Expirations.TimeToLive.MISSING).hasToString("MISSING"); + assertThat(Expirations.TimeToLive.of(1, TimeUnit.SECONDS)).hasToString("1 SECONDS"); + } + static Expirations createExpirations(Timeouts timeouts) { List keys = IntStream.range(1, timeouts.raw().size() + 1).mapToObj("key-%s"::formatted).toList(); diff --git a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java index 728258ade8..0a03b7340e 100644 --- a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java @@ -15,7 +15,7 @@ */ package org.springframework.data.redis.support.collections; -import static org.assertj.core.api.Assertions.*; +import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.*; import static org.assertj.core.api.Assumptions.*; import java.io.IOException; @@ -42,6 +42,7 @@ import org.springframework.data.redis.ObjectFactory; import org.springframework.data.redis.RawObjectFactory; import org.springframework.data.redis.RedisSystemException; +import org.springframework.data.redis.core.BoundHashFieldExpirationOperations; import org.springframework.data.redis.core.Cursor; import org.springframework.data.redis.core.ExpireChanges; import org.springframework.data.redis.core.RedisCallback; @@ -204,15 +205,15 @@ void testExpire() { V v1 = getValue(); assertThat(map.put(k1, v1)).isEqualTo(null); - Collection keys = Collections.singletonList(k1); - assertThat(map.expire(Duration.ofSeconds(5), keys)).satisfies(ExpireChanges::allOk); - assertThat(map.getTimeToLive(keys)).satisfies(expiration -> { + BoundHashFieldExpirationOperations ops = map.expiration(Collections.singletonList(k1)); + assertThat(ops.expire(Duration.ofSeconds(5))).satisfies(ExpireChanges::allOk); + assertThat(ops.getTimeToLive()).satisfies(expiration -> { assertThat(expiration.expirationOf(k1).raw()).isBetween(1L, 5L); }); - assertThat(map.getTimeToLive(TimeUnit.MILLISECONDS, keys)).satisfies(expiration -> { + assertThat(ops.getTimeToLive(TimeUnit.MILLISECONDS)).satisfies(expiration -> { assertThat(expiration.expirationOf(k1).raw()).isBetween(1000L, 5000L); }); - assertThat(map.persist(keys)).satisfies(ExpireChanges::allOk); + assertThat(ops.persist()).satisfies(ExpireChanges::allOk); } @ParameterizedRedisTest @@ -223,15 +224,15 @@ void testExpireAt() { V v1 = getValue(); assertThat(map.put(k1, v1)).isEqualTo(null); - Collection keys = Collections.singletonList(k1); - assertThat(map.expireAt(Instant.now().plusSeconds(5), keys)).satisfies(ExpireChanges::allOk); - assertThat(map.getTimeToLive(keys)).satisfies(expiration -> { + BoundHashFieldExpirationOperations ops = map.expiration(Collections.singletonList(k1)); + assertThat(ops.expireAt(Instant.now().plusSeconds(5))).satisfies(ExpireChanges::allOk); + assertThat(ops.getTimeToLive()).satisfies(expiration -> { assertThat(expiration.expirationOf(k1).raw()).isBetween(1L, 5L); }); - assertThat(map.getTimeToLive(TimeUnit.MILLISECONDS, keys)).satisfies(expiration -> { + assertThat(ops.getTimeToLive(TimeUnit.MILLISECONDS)).satisfies(expiration -> { assertThat(expiration.expirationOf(k1).raw()).isBetween(1000L, 5000L); }); - assertThat(map.persist(keys)).satisfies(ExpireChanges::allOk); + assertThat(ops.persist()).satisfies(ExpireChanges::allOk); } @ParameterizedRedisTest diff --git a/src/test/java/org/springframework/data/redis/support/collections/RedisPropertiesIntegrationTests.java b/src/test/java/org/springframework/data/redis/support/collections/RedisPropertiesIntegrationTests.java index 17e63c3113..be3e627a28 100644 --- a/src/test/java/org/springframework/data/redis/support/collections/RedisPropertiesIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/support/collections/RedisPropertiesIntegrationTests.java @@ -185,12 +185,6 @@ void testStringPropertyNames() throws Exception { assertThat(keys.contains(key3)).isTrue(); } - @ParameterizedRedisTest - @Override - public void testScanWorksCorrectly() { - assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(() -> super.testScanWorksCorrectly()); - } - // DATAREDIS-241 public static Collection testParams() { From cdefb2eec3b11ccc3dfce8e7359cb1c495f29a8a Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Tue, 4 Mar 2025 14:24:59 +0100 Subject: [PATCH 10/13] Add Hash Field Expiration commands to TypeHints for Lettuce. --- .../antora/modules/ROOT/pages/appendix.adoc | 7 ++++ .../connection/lettuce/LettuceConnection.java | 8 ++++ .../data/redis/core/RedisCommand.java | 7 ++++ .../AbstractConnectionIntegrationTests.java | 42 ++++++++++++++++++- 4 files changed, 63 insertions(+), 1 deletion(-) diff --git a/src/main/antora/modules/ROOT/pages/appendix.adoc b/src/main/antora/modules/ROOT/pages/appendix.adoc index 46feff0611..96aaea4597 100644 --- a/src/main/antora/modules/ROOT/pages/appendix.adoc +++ b/src/main/antora/modules/ROOT/pages/appendix.adoc @@ -67,6 +67,13 @@ link:https://www.springframework.org/schema/redis/spring-redis-1.0.xsd[Spring Da |GETSET |X |HDEL |X |HEXISTS |X +|HEXPIRE |X +|HEXPIREAT |X +|HPEXPIRE |X +|HPEXPIREAT |X +|HPERSIST |X +|HTTL |X +|HPTTL |X |HGET |X |HGETALL |X |HINCRBY |X diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConnection.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConnection.java index fc8460d514..fe646b3a1b 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConnection.java @@ -1160,6 +1160,14 @@ static class TypeHints { COMMAND_OUTPUT_TYPE_MAPPING.put(PFMERGE, IntegerOutput.class); COMMAND_OUTPUT_TYPE_MAPPING.put(PFADD, IntegerOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HEXPIRE, IntegerListOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HEXPIREAT, IntegerListOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HPEXPIRE, IntegerListOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HPEXPIREAT, IntegerListOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HPERSIST, IntegerListOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HTTL, IntegerListOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HPTTL, IntegerListOutput.class); + // DOUBLE COMMAND_OUTPUT_TYPE_MAPPING.put(HINCRBYFLOAT, DoubleOutput.class); COMMAND_OUTPUT_TYPE_MAPPING.put(INCRBYFLOAT, DoubleOutput.class); diff --git a/src/main/java/org/springframework/data/redis/core/RedisCommand.java b/src/main/java/org/springframework/data/redis/core/RedisCommand.java index e9303233df..53dc940a97 100644 --- a/src/main/java/org/springframework/data/redis/core/RedisCommand.java +++ b/src/main/java/org/springframework/data/redis/core/RedisCommand.java @@ -107,6 +107,13 @@ public enum RedisCommand { HSET("w", 3, 3), // HSETNX("w", 3, 3), // HVALS("r", 1, 1), // + HEXPIRE("w", 5), // + HEXPIREAT("w", 5), // + HPEXPIRE("w", 5), // + HPEXPIREAT("w", 5), // + HPERSIST("w", 4), // + HTTL("r", 4), // + HPTTL("r", 4), // // -- I INCR("rw", 1), // INCRBYFLOAT("rw", 2, 2), // diff --git a/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java b/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java index ffa5bcd100..94b8db7657 100644 --- a/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java @@ -98,6 +98,7 @@ import org.springframework.data.redis.test.condition.RedisDriver; import org.springframework.data.redis.test.util.HexStringUtils; import org.springframework.data.util.Streamable; +import org.springframework.util.ObjectUtils; /** * Base test class for AbstractConnection integration tests @@ -752,6 +753,45 @@ void testExecute() { assertThat(stringSerializer.deserialize((byte[]) getResults().get(1))).isEqualTo("bar"); } + @Test + @EnabledOnCommand("HEXPIRE") + void testExecuteHashFieldExpiration() { + + actual.add(connection.hSet("foo", "bar", "field")); + actual.add(connection.execute("HTTL", "foo", "FIELDS", "1", "bar")); + actual.add(connection.execute("HEXPIRE", "foo", "100", "NX", "FIELDS", "1", "bar")); + actual.add(connection.execute("HPERSIST", "foo", "FIELDS", "1", "bar")); + actual.add(connection.execute("HTTL", "foo", "FIELDS", "1", "bar")); + + List results = getResults(); + + assertThat(deserializeList(results, 1, stringSerializer)).containsOnly(-1L); + assertThat(deserializeList(results, 2, stringSerializer)).containsOnly(1L); + assertThat(deserializeList(results, 3, stringSerializer)).containsOnly(1L); + assertThat(deserializeList(results, 4, stringSerializer)).containsOnly(-1L); + } + + List deserializeList(List objects, int index, RedisSerializer serializer) { + + List result = new ArrayList<>(); + Object o = objects.get(index); + if (o instanceof List ls) { + for (Object obj : ls) { + + if (obj instanceof byte[]) { + result.add(serializer.deserialize((byte[]) obj)); + } else { + result.add(obj); + } + } + + return result; + } + + throw new IllegalArgumentException( + "Object at index " + index + " is not a list but " + ObjectUtils.nullSafeToString(o)); + } + @Test void testExecuteNoArgs() { @@ -3436,7 +3476,7 @@ void hStrLenReturnsZeroWhenKeyDoesNotExist() { @Test @EnabledOnCommand("HEXPIRE") public void hExpireReturnsSuccessAndSetsTTL() { - + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); actual.add(connection.hExpire("hash-hexpire", 5L, "key-2")); actual.add(connection.hTtl("hash-hexpire", "key-2")); From b346d714139bfbe47ab0c02817e377db039e4452 Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Tue, 4 Mar 2025 14:32:11 +0100 Subject: [PATCH 11/13] Polishing. --- .../antora/modules/ROOT/pages/appendix.adoc | 381 +++++++++--------- 1 file changed, 191 insertions(+), 190 deletions(-) diff --git a/src/main/antora/modules/ROOT/pages/appendix.adoc b/src/main/antora/modules/ROOT/pages/appendix.adoc index 96aaea4597..669bf82204 100644 --- a/src/main/antora/modules/ROOT/pages/appendix.adoc +++ b/src/main/antora/modules/ROOT/pages/appendix.adoc @@ -8,200 +8,201 @@ link:https://www.springframework.org/schema/redis/spring-redis-1.0.xsd[Spring Da [[supported-commands]] == Supported Commands + .Redis commands supported by `RedisTemplate` [width="50%",cols="<2,^1",options="header"] |========================================================= |Command |Template Support -|APPEND |X -|AUTH |X -|BGREWRITEAOF |X -|BGSAVE |X -|BITCOUNT |X -|BITFIELD |X -|BITOP |X -|BLPOP |X -|BRPOP |X -|BRPOPLPUSH |X -|CLIENT KILL |X -|CLIENT GETNAME |X -|CLIENT LIST |X -|CLIENT SETNAME |X -|CLUSTER SLOTS |- -|COMMAND |- -|COMMAND COUNT |- -|COMMAND GETKEYS |- -|COMMAND INFO |- -|CONFIG GET |X -|CONFIG RESETSTAT |X -|CONFIG REWRITE |- -|CONFIG SET |X -|DBSIZE |X -|DEBUG OBJECT |- -|DEBUG SEGFAULT |- -|DECR |X -|DECRBY |X -|DEL |X -|DISCARD |X -|DUMP |X -|ECHO |X -|EVAL |X -|EVALSHA |X -|EXEC |X -|EXISTS |X -|EXPIRE |X -|EXPIREAT |X -|FLUSHALL |X -|FLUSHDB |X -|GEOADD |X -|GEODIST |X -|GEOHASH |X -|GEOPOS |X -|GEORADIUS |X -|GEORADIUSBYMEMBER |X -|GEOSEARCH |X -|GEOSEARCHSTORE |X -|GET |X -|GETBIT |X -|GETRANGE |X -|GETSET |X -|HDEL |X -|HEXISTS |X -|HEXPIRE |X -|HEXPIREAT |X -|HPEXPIRE |X -|HPEXPIREAT |X -|HPERSIST |X -|HTTL |X -|HPTTL |X -|HGET |X -|HGETALL |X -|HINCRBY |X -|HINCRBYFLOAT |X -|HKEYS |X -|HLEN |X -|HMGET |X -|HMSET |X -|HSCAN |X -|HSET |X -|HSETNX |X -|HVALS |X -|INCR |X -|INCRBY |X -|INCRBYFLOAT |X -|INFO |X -|KEYS |X -|LASTSAVE |X -|LINDEX |X -|LINSERT |X -|LLEN |X -|LPOP |X -|LPUSH |X -|LPUSHX |X -|LRANGE |X -|LREM |X -|LSET |X -|LTRIM |X -|MGET |X -|MIGRATE |- -|MONITOR |- -|MOVE |X -|MSET |X -|MSETNX |X -|MULTI |X -|OBJECT |- -|PERSIST |X -|PEXIPRE |X -|PEXPIREAT |X -|PFADD |X -|PFCOUNT |X -|PFMERGE |X -|PING |X -|PSETEX |X -|PSUBSCRIBE |X -|PTTL |X -|PUBLISH |X -|PUBSUB |- -|PUBSUBSCRIBE |- -|QUIT |X -|RANDOMKEY |X -|RENAME |X -|RENAMENX |X -|REPLICAOF |X -|RESTORE |X -|ROLE |- -|RPOP |X -|RPOPLPUSH |X -|RPUSH |X -|RPUSHX |X -|SADD |X -|SAVE |X -|SCAN |X -|SCARD |X -|SCRIPT EXITS |X -|SCRIPT FLUSH |X -|SCRIPT KILL |X -|SCRIPT LOAD |X -|SDIFF |X -|SDIFFSTORE |X -|SELECT |X -|SENTINEL FAILOVER |X +|APPEND |X +|AUTH |X +|BGREWRITEAOF |X +|BGSAVE |X +|BITCOUNT |X +|BITFIELD |X +|BITOP |X +|BLPOP |X +|BRPOP |X +|BRPOPLPUSH |X +|CLIENT KILL |X +|CLIENT GETNAME |X +|CLIENT LIST |X +|CLIENT SETNAME |X +|CLUSTER SLOTS |- +|COMMAND |- +|COMMAND COUNT |- +|COMMAND GETKEYS |- +|COMMAND INFO |- +|CONFIG GET |X +|CONFIG RESETSTAT |X +|CONFIG REWRITE |- +|CONFIG SET |X +|DBSIZE |X +|DEBUG OBJECT |- +|DEBUG SEGFAULT |- +|DECR |X +|DECRBY |X +|DEL |X +|DISCARD |X +|DUMP |X +|ECHO |X +|EVAL |X +|EVALSHA |X +|EXEC |X +|EXISTS |X +|EXPIRE |X +|EXPIREAT |X +|FLUSHALL |X +|FLUSHDB |X +|GEOADD |X +|GEODIST |X +|GEOHASH |X +|GEOPOS |X +|GEORADIUS |X +|GEORADIUSBYMEMBER |X +|GEOSEARCH |X +|GEOSEARCHSTORE |X +|GET |X +|GETBIT |X +|GETRANGE |X +|GETSET |X +|HDEL |X +|HEXISTS |X +|HEXPIRE |X +|HEXPIREAT |X +|HPEXPIRE |X +|HPEXPIREAT |X +|HPERSIST |X +|HTTL |X +|HPTTL |X +|HGET |X +|HGETALL |X +|HINCRBY |X +|HINCRBYFLOAT |X +|HKEYS |X +|HLEN |X +|HMGET |X +|HMSET |X +|HSCAN |X +|HSET |X +|HSETNX |X +|HVALS |X +|INCR |X +|INCRBY |X +|INCRBYFLOAT |X +|INFO |X +|KEYS |X +|LASTSAVE |X +|LINDEX |X +|LINSERT |X +|LLEN |X +|LPOP |X +|LPUSH |X +|LPUSHX |X +|LRANGE |X +|LREM |X +|LSET |X +|LTRIM |X +|MGET |X +|MIGRATE |- +|MONITOR |- +|MOVE |X +|MSET |X +|MSETNX |X +|MULTI |X +|OBJECT |- +|PERSIST |X +|PEXIPRE |X +|PEXPIREAT |X +|PFADD |X +|PFCOUNT |X +|PFMERGE |X +|PING |X +|PSETEX |X +|PSUBSCRIBE |X +|PTTL |X +|PUBLISH |X +|PUBSUB |- +|PUBSUBSCRIBE |- +|QUIT |X +|RANDOMKEY |X +|RENAME |X +|RENAMENX |X +|REPLICAOF |X +|RESTORE |X +|ROLE |- +|RPOP |X +|RPOPLPUSH |X +|RPUSH |X +|RPUSHX |X +|SADD |X +|SAVE |X +|SCAN |X +|SCARD |X +|SCRIPT EXITS |X +|SCRIPT FLUSH |X +|SCRIPT KILL |X +|SCRIPT LOAD |X +|SDIFF |X +|SDIFFSTORE |X +|SELECT |X +|SENTINEL FAILOVER |X |SENTINEL GET-MASTER-ADD-BY-NAME |- -|SENTINEL MASTER | - -|SENTINEL MASTERS |X -|SENTINEL MONITOR |X -|SENTINEL REMOVE |X -|SENTINEL RESET |- -|SENTINEL SET |- -|SENTINEL SLAVES |X -|SET |X -|SETBIT |X -|SETEX |X -|SETNX |X -|SETRANGE |X -|SHUTDOWN |X -|SINTER |X -|SINTERSTORE |X -|SISMEMBER |X -|SLAVEOF |X -|SLOWLOG |- -|SMEMBERS |X -|SMOVE |X -|SORT |X -|SPOP |X -|SRANDMEMBER |X -|SREM |X -|SSCAN |X -|STRLEN |X -|SUBSCRIBE |X -|SUNION |X -|SUNIONSTORE |X -|SYNC |- -|TIME |X -|TTL |X -|TYPE |X -|UNSUBSCRIBE |X -|UNWATCH |X -|WATCH |X -|ZADD |X -|ZCARD |X -|ZCOUNT |X -|ZINCRBY |X -|ZINTERSTORE |X -|ZLEXCOUNT |- -|ZRANGE |X -|ZRANGEBYLEX |- -|ZREVRANGEBYLEX |- -|ZRANGEBYSCORE |X -|ZRANGESTORE |X -|ZRANK |X -|ZREM |X -|ZREMRANGEBYLEX |- -|ZREMRANGEBYRANK |X -|ZREVRANGE |X -|ZREVRANGEBYSCORE |X -|ZREVRANK |X -|ZSCAN |X -|ZSCORE |X -|ZUNINONSTORE |X +|SENTINEL MASTER | - +|SENTINEL MASTERS |X +|SENTINEL MONITOR |X +|SENTINEL REMOVE |X +|SENTINEL RESET |- +|SENTINEL SET |- +|SENTINEL SLAVES |X +|SET |X +|SETBIT |X +|SETEX |X +|SETNX |X +|SETRANGE |X +|SHUTDOWN |X +|SINTER |X +|SINTERSTORE |X +|SISMEMBER |X +|SLAVEOF |X +|SLOWLOG |- +|SMEMBERS |X +|SMOVE |X +|SORT |X +|SPOP |X +|SRANDMEMBER |X +|SREM |X +|SSCAN |X +|STRLEN |X +|SUBSCRIBE |X +|SUNION |X +|SUNIONSTORE |X +|SYNC |- +|TIME |X +|TTL |X +|TYPE |X +|UNSUBSCRIBE |X +|UNWATCH |X +|WATCH |X +|ZADD |X +|ZCARD |X +|ZCOUNT |X +|ZINCRBY |X +|ZINTERSTORE |X +|ZLEXCOUNT |- +|ZRANGE |X +|ZRANGEBYLEX |- +|ZREVRANGEBYLEX |- +|ZRANGEBYSCORE |X +|ZRANGESTORE |X +|ZRANK |X +|ZREM |X +|ZREMRANGEBYLEX |- +|ZREMRANGEBYRANK |X +|ZREVRANGE |X +|ZREVRANGEBYSCORE |X +|ZREVRANK |X +|ZSCAN |X +|ZSCORE |X +|ZUNINONSTORE |X |========================================================= From 552458da147ffc0bd6744e02b146015272349631 Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Tue, 4 Mar 2025 15:02:15 +0100 Subject: [PATCH 12/13] Add missing test guards. --- .../data/redis/core/DefaultHashOperationsIntegrationTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java index 6fd9671f27..bbb84a76de 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java @@ -326,6 +326,7 @@ void testExpireAtAndGetExpireMillis() { } @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") void expireThrowsErrorOfNanoPrecision() { K key = keyFactory.instance(); @@ -336,6 +337,7 @@ void expireThrowsErrorOfNanoPrecision() { } @ParameterizedRedisTest + @EnabledOnCommand("HEXPIRE") void testExpireWithOptionsNone() { K key = keyFactory.instance(); From d9a213a6d8c50fc771556166d2a97a5210807d72 Mon Sep 17 00:00:00 2001 From: Christoph Strobl Date: Wed, 5 Mar 2025 11:59:20 +0100 Subject: [PATCH 13/13] Update javadoc and format sources. --- .../connection/ReactiveHashCommands.java | 38 +++++ .../redis/connection/RedisHashCommands.java | 55 ++++--- .../connection/StringRedisConnection.java | 20 +-- .../BoundHashFieldExpirationOperations.java | 29 +--- .../core/DefaultReactiveHashOperations.java | 6 +- .../data/redis/core/HashOperations.java | 32 +--- .../redis/core/ReactiveHashOperations.java | 43 ++++-- .../data/redis/core/RedisOperations.java | 1 - .../AbstractConnectionIntegrationTests.java | 96 +++++++----- .../jedis/JedisClusterConnectionTests.java | 137 ++++++++++++------ .../LettuceClusterConnectionTests.java | 105 ++++++++++---- ...eReactiveHashCommandsIntegrationTests.java | 48 ++---- ...DefaultHashOperationsIntegrationTests.java | 35 +++-- ...eactiveHashOperationsIntegrationTests.java | 56 +++---- .../AbstractRedisMapIntegrationTests.java | 10 +- 15 files changed, 424 insertions(+), 287 deletions(-) diff --git a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java index 1e9fd94f3a..c463737747 100644 --- a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java @@ -865,22 +865,52 @@ private ExpireCommand(@Nullable ByteBuffer key, List fields, Expirat this.options = options; } + /** + * Creates a new {@link ExpireCommand}. + * + * @param fields the {@code field} names to apply expiration to + * @param timeout the actual timeout + * @param unit the unit of measure for the {@code timeout}. + * @return new instance of {@link ExpireCommand}. + */ public static ExpireCommand expire(List fields, long timeout, TimeUnit unit) { Assert.notNull(fields, "Field must not be null"); return expire(fields, Expiration.from(timeout, unit)); } + /** + * Creates a new {@link ExpireCommand}. + * + * @param fields the {@code field} names to apply expiration to. + * @param ttl the actual timeout. + * @return new instance of {@link ExpireCommand}. + */ public static ExpireCommand expire(List fields, Duration ttl) { Assert.notNull(fields, "Field must not be null"); return expire(fields, Expiration.from(ttl)); } + /** + * Creates a new {@link ExpireCommand}. + * + * @param fields the {@code field} names to apply expiration to + * @param expiration the {@link Expiration} to apply to the given {@literal fields}. + * @return new instance of {@link ExpireCommand}. + */ public static ExpireCommand expire(List fields, Expiration expiration) { return new ExpireCommand(null, fields, expiration, FieldExpirationOptions.none()); } + /** + * Creates a new {@link ExpireCommand}. + * + * @param fields the {@code field} names to apply expiration to + * @param ttl the unix point in time when to expire the given {@literal fields}. + * @param precision can be {@link TimeUnit#SECONDS} or {@link TimeUnit#MILLISECONDS}. + * @return new instance of {@link ExpireCommand}. + */ public static ExpireCommand expireAt(List fields, Instant ttl, TimeUnit precision) { if (precision.compareTo(TimeUnit.MILLISECONDS) > 0) { @@ -890,10 +920,18 @@ public static ExpireCommand expireAt(List fields, Instant ttl, TimeU return expire(fields, Expiration.unixTimestamp(ttl.toEpochMilli(), TimeUnit.MILLISECONDS)); } + /** + * @param key the {@literal key} from which to expire the {@literal fields} from. + * @return new instance of {@link ExpireCommand}. + */ public ExpireCommand from(ByteBuffer key) { return new ExpireCommand(key, getFields(), expiration, options); } + /** + * @param options additional options to be sent along with the command. + * @return new instance of {@link ExpireCommand}. + */ public ExpireCommand withOptions(FieldExpirationOptions options) { return new ExpireCommand(getKey(), getFields(), getExpiration(), options); } diff --git a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java index f2d736b8ef..d038708526 100644 --- a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java @@ -255,11 +255,34 @@ public interface RedisHashCommands { @Nullable Long hStrLen(byte[] key, byte[] field); + /** + * Apply a given {@link org.springframework.data.redis.core.types.Expiration} to the given {@literal fields}. + * + * @param key must not be {@literal null}. + * @param expiration the {@link org.springframework.data.redis.core.types.Expiration} to apply. + * @param fields the names of the {@literal fields} to apply the {@literal expiration} to. + * @return a {@link List} holding the command result for each field in order - {@code 2} indicating the specific field + * is deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration + * time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no + * such field; + * @since 3.5 + */ default @Nullable List applyExpiration(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, byte[]... fields) { return applyExpiration(key, expiration, FieldExpirationOptions.none(), fields); } + /** + * @param key must not be {@literal null}. + * @param expiration the {@link org.springframework.data.redis.core.types.Expiration} to apply. + * @param options additional options to be sent along with the command. + * @param fields the names of the {@literal fields} to apply the {@literal expiration} to. + * @return a {@link List} holding the command result for each field in order - {@code 2} indicating the specific field + * is deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration + * time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT + * condition is not met); {@code -2} indicating there is no such field; + * @since 3.5 + */ @Nullable default List applyExpiration(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, FieldExpirationOptions options, byte[]... fields) { @@ -304,9 +327,8 @@ default List applyExpiration(byte[] key, org.springframework.data.redis.co * @param fields must not be {@literal null}. * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time - * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition - * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / - * transaction. + * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HEXPIRE * @since 3.5 */ @@ -324,9 +346,8 @@ default List hExpire(byte[] key, long seconds, byte[]... fields) { * @param fields must not be {@literal null}. * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time - * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition - * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / - * transaction. + * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HEXPIRE * @since 3.5 */ @@ -362,9 +383,8 @@ default List hExpire(byte[] key, Duration ttl, byte[]... fields) { * @param fields must not be {@literal null}. * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time - * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition - * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / - * transaction. + * is set/updated; {@code 0} indicating the expiration time is not set ; {@code -2} indicating there is no + * such field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HPEXPIRE * @since 3.5 */ @@ -382,9 +402,8 @@ default List hpExpire(byte[] key, long millis, byte[]... fields) { * @param fields must not be {@literal null}. * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time - * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition - * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / - * transaction. + * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HPEXPIRE * @since 3.5 */ @@ -420,9 +439,8 @@ default List hpExpire(byte[] key, Duration ttl, byte[]... fields) { * @param fields must not be {@literal null}. * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating - * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | - * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in - * pipeline / transaction. + * expiration time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating + * there is no such field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HEXPIREAT * @since 3.5 */ @@ -457,9 +475,8 @@ default List hExpireAt(byte[] key, long unixTime, byte[]... fields) { * @param fields must not be {@literal null}. * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating - * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | - * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in - * pipeline / transaction. + * expiration time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating + * there is no such field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HPEXPIREAT * @since 3.5 */ @@ -531,8 +548,6 @@ List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions. * @since 3.5 */ @Nullable - // TODO: this is complete nonsense as it would jeopardize negative values - // TODO: this should be a List> List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields); /** diff --git a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java index 5ff0e9946f..1069e430c8 100644 --- a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java @@ -2341,9 +2341,8 @@ Long zRangeStoreRevByScore(String dstKey, String srcKey, * @param fields must not be {@literal null}. * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time - * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition - * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / - * transaction. + * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HEXPIRE * @since 3.5 */ @@ -2377,9 +2376,8 @@ default List hExpire(String key, long seconds, String... fields) { * @param fields must not be {@literal null}. * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time - * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition - * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / - * transaction. + * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HPEXPIRE * @since 3.5 */ @@ -2413,9 +2411,8 @@ default List hpExpire(String key, long millis, String... fields) { * @param fields must not be {@literal null}. * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating - * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | - * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in - * pipeline / transaction. + * expiration time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating + * there is no such field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HEXPIREAT * @since 3.5 */ @@ -2449,9 +2446,8 @@ default List hExpireAt(String key, long unixTime, String... fields) { * @param fields must not be {@literal null}. * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating - * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | - * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in - * pipeline / transaction. + * expiration time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating + * there is no such field; {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HPEXPIREAT * @since 3.5 */ diff --git a/src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java b/src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java index 33e0ff82e6..27779a88a8 100644 --- a/src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java +++ b/src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java @@ -37,7 +37,7 @@ public interface BoundHashFieldExpirationOperations { * Apply {@link Expiration} to the hash without any additional constraints. * * @param expiration the expiration definition. - * @return changes to the hash fields. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. */ default ExpireChanges expire(Expiration expiration) { return expire(expiration, Hash.FieldExpirationOptions.none()); @@ -48,7 +48,7 @@ default ExpireChanges expire(Expiration expiration) { * * @param expiration the expiration definition. * @param options expiration options. - * @return changes to the hash fields. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. */ ExpireChanges expire(Expiration expiration, Hash.FieldExpirationOptions options); @@ -56,11 +56,7 @@ default ExpireChanges expire(Expiration expiration) { * Set time to live for given {@code hashKey}. * * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is - * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time - * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition - * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / - * transaction. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. * @throws IllegalArgumentException if the timeout is {@literal null}. * @see Redis Documentation: HEXPIRE * @since 3.5 @@ -72,11 +68,7 @@ default ExpireChanges expire(Expiration expiration) { * Set the expiration for given {@code hashKey} as a {@literal date} timestamp. * * @param expireAt must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is - * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating - * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | - * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in - * pipeline / transaction. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. * @see Redis Documentation: HEXPIRE * @since 3.5 @@ -87,9 +79,7 @@ default ExpireChanges expire(Expiration expiration) { /** * Remove the expiration from given {@code hashKey} . * - * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is - * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such - * field; {@literal null} when used in pipeline / transaction. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HPERSIST * @since 3.5 */ @@ -99,10 +89,7 @@ default ExpireChanges expire(Expiration expiration) { /** * Get the time to live for {@code hashKey} in seconds. * - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative - * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration - * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / - * transaction. + * @return the actual expirations in seconds for the hash fields. {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HTTL * @since 3.5 */ @@ -113,9 +100,7 @@ default ExpireChanges expire(Expiration expiration) { * Get the time to live for {@code hashKey} and convert it to the given {@link TimeUnit}. * * @param timeUnit must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative - * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration - * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * @return the actual expirations for the hash fields in the given time unit. {@literal null} when used in pipeline / * transaction. * @see Redis Documentation: HTTL * @since 3.5 diff --git a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java index 8c97c43fcd..540b351778 100644 --- a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java @@ -29,7 +29,6 @@ import java.util.function.Function; import org.reactivestreams.Publisher; - import org.springframework.dao.InvalidDataAccessApiUsageException; import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.ReactiveHashCommands; @@ -249,13 +248,14 @@ public Mono> expire(H key, Duration timeout, Collection ha } @Override - public Mono> expire(H key, Expiration expiration, FieldExpirationOptions options, Collection hashKeys) { + public Mono> expire(H key, Expiration expiration, FieldExpirationOptions options, + Collection hashKeys) { List orderedKeys = List.copyOf(hashKeys); ByteBuffer rawKey = rawKey(key); List rawHashKeys = orderedKeys.stream().map(this::rawHashKey).toList(); - Mono> raw =createFlux(connection -> { + Mono> raw = createFlux(connection -> { return connection .applyExpiration(Mono.just(ExpireCommand.expire(rawHashKeys, expiration).from(rawKey).withOptions(options))) .map(NumericResponse::getOutput); diff --git a/src/main/java/org/springframework/data/redis/core/HashOperations.java b/src/main/java/org/springframework/data/redis/core/HashOperations.java index 67c4ce0fa8..f57143c737 100644 --- a/src/main/java/org/springframework/data/redis/core/HashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/HashOperations.java @@ -235,11 +235,7 @@ public interface HashOperations { * @param key must not be {@literal null}. * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is - * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time - * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition - * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / - * transaction. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. * @throws IllegalArgumentException if the timeout is {@literal null}. * @see Redis Documentation: HEXPIRE * @since 3.5 @@ -253,11 +249,7 @@ public interface HashOperations { * @param key must not be {@literal null}. * @param expireAt must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is - * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating - * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | - * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in - * pipeline / transaction. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. * @see Redis Documentation: HEXPIRE * @since 3.5 @@ -272,11 +264,7 @@ public interface HashOperations { * @param expiration must not be {@literal null}. * @param options must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is - * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating - * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | - * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in - * pipeline / transaction. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. * @see Redis Documentation: HEXPIRE * @since 3.5 @@ -289,9 +277,7 @@ public interface HashOperations { * * @param key must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is - * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such - * field; {@literal null} when used in pipeline / transaction. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HPERSIST * @since 3.5 */ @@ -303,10 +289,7 @@ public interface HashOperations { * * @param key must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative - * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration - * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / - * transaction. + * @return the actual expirations in seconds for the hash fields. {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HTTL * @since 3.5 */ @@ -321,10 +304,7 @@ default Expirations getTimeToLive(H key, Collection hashKeys) { * @param key must not be {@literal null}. * @param timeUnit must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative - * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration - * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / - * transaction. + * @return the actual expirations for the hash fields. {@literal null} when used in pipeline / transaction. * @see Redis Documentation: HTTL * @since 3.5 */ diff --git a/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java b/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java index 757dbcea76..2fe3b0c65b 100644 --- a/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java @@ -238,8 +238,31 @@ default Flux> scan(H key) { */ Flux> scan(H key, ScanOptions options); + /** + * Set time to live for given {@literal hashKeys} stored within {@literal key}. + * + * @param key must not be {@literal null}. + * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a {@link Mono} emitting changes to the hash fields. + * @throws IllegalArgumentException if the timeout is {@literal null}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ Mono> expire(H key, Duration timeout, Collection hashKeys); + /** + * Set time to live for given {@literal hashKeys} stored within {@literal key}. + * + * @param key must not be {@literal null}. + * @param expiration must not be {@literal null}. + * @param options additional options to apply. + * @param hashKeys must not be {@literal null}. + * @return a {@link Mono} emitting changes to the hash fields. + * @throws IllegalArgumentException if the timeout is {@literal null}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ Mono> expire(H key, Expiration expiration, FieldExpirationOptions options, Collection hashKeys); /** @@ -248,11 +271,7 @@ default Flux> scan(H key) { * @param key must not be {@literal null}. * @param expireAt must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is - * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating - * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | - * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in - * pipeline / transaction. + * @return a {@link Mono} emitting changes to the hash fields. * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. * @see Redis Documentation: HEXPIRE * @since 3.5 @@ -265,9 +284,7 @@ default Flux> scan(H key) { * * @param key must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is - * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such - * field; {@literal null} when used in pipeline / transaction. + * @return a {@link Mono} emitting changes to the hash fields. * @see Redis Documentation: HPERSIST * @since 3.5 */ @@ -279,10 +296,7 @@ default Flux> scan(H key) { * * @param key must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative - * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration - * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / - * transaction. + * @return a {@link Mono} emitting {@link Expirations} of the hash fields. * @see Redis Documentation: HTTL * @since 3.5 */ @@ -297,10 +311,7 @@ default Mono> getTimeToLive(H key, Collection hashKeys) { * @param key must not be {@literal null}. * @param timeUnit must not be {@literal null}. * @param hashKeys must not be {@literal null}. - * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative - * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration - * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / - * transaction. + * @return a {@link Mono} emitting {@link Expirations} of the hash fields. * @see Redis Documentation: HTTL * @since 3.5 */ diff --git a/src/main/java/org/springframework/data/redis/core/RedisOperations.java b/src/main/java/org/springframework/data/redis/core/RedisOperations.java index def0dca04b..8c1ad67ad6 100644 --- a/src/main/java/org/springframework/data/redis/core/RedisOperations.java +++ b/src/main/java/org/springframework/data/redis/core/RedisOperations.java @@ -376,7 +376,6 @@ default Boolean expireAt(K key, Instant expireAt) { @Nullable Boolean persist(K key); - // TODO: Add TimeToLive (getTimeToLive) /** * Get the time to live for {@code key} in seconds. * diff --git a/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java b/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java index 94b8db7657..573f105247 100644 --- a/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java @@ -15,24 +15,48 @@ */ package org.springframework.data.redis.connection; -import static org.assertj.core.api.Assertions.*; -import static org.assertj.core.api.Assumptions.*; -import static org.awaitility.Awaitility.*; -import static org.junit.jupiter.api.condition.OS.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldIncrBy.Overflow.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.*; -import static org.springframework.data.redis.connection.ClusterTestVariables.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.DistanceUnit.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.GeoSearchStoreCommandArgs.*; -import static org.springframework.data.redis.core.ScanOptions.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.within; +import static org.assertj.core.api.Assumptions.assumeThat; +import static org.awaitility.Awaitility.await; +import static org.junit.jupiter.api.condition.OS.MAC; +import static org.springframework.data.redis.connection.BitFieldSubCommands.create; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldIncrBy.Overflow.FAIL; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.INT_8; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.signed; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.unsigned; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_4; +import static org.springframework.data.redis.connection.RedisGeoCommands.DistanceUnit.KILOMETERS; +import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs; +import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.newGeoSearchArgs; +import static org.springframework.data.redis.connection.RedisGeoCommands.GeoSearchStoreCommandArgs.newGeoSearchStoreArgs; +import static org.springframework.data.redis.core.ScanOptions.scanOptions; import java.nio.charset.StandardCharsets; import java.time.Duration; import java.time.Instant; import java.time.temporal.ChronoUnit; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.UUID; import java.util.concurrent.BlockingDeque; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.TimeUnit; @@ -753,7 +777,7 @@ void testExecute() { assertThat(stringSerializer.deserialize((byte[]) getResults().get(1))).isEqualTo("bar"); } - @Test + @Test // GH- @EnabledOnCommand("HEXPIRE") void testExecuteHashFieldExpiration() { @@ -3473,7 +3497,7 @@ void hStrLenReturnsZeroWhenKeyDoesNotExist() { verifyResults(Arrays.asList(new Object[] { 0L })); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireReturnsSuccessAndSetsTTL() { @@ -3484,10 +3508,10 @@ public void hExpireReturnsSuccessAndSetsTTL() { List results = getResults(); assertThat(results.get(0)).isEqualTo(Boolean.TRUE); assertThat((List) results.get(1)).contains(1L); - assertThat((List) results.get(2)).allSatisfy( value -> assertThat((Long)value).isBetween(0L, 5L)); + assertThat((List) results.get(2)).allSatisfy(value -> assertThat((Long) value).isBetween(0L, 5L)); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { @@ -3498,7 +3522,7 @@ public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireReturnsTwoWhenZeroProvided() { @@ -3508,7 +3532,7 @@ public void hExpireReturnsTwoWhenZeroProvided() { verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); } - @Test + @Test // GH-3054 @EnabledOnCommand("HPEXPIRE") public void hpExpireReturnsSuccessAndSetsTTL() { @@ -3519,10 +3543,10 @@ public void hpExpireReturnsSuccessAndSetsTTL() { List results = getResults(); assertThat(results.get(0)).isEqualTo(Boolean.TRUE); assertThat((List) results.get(1)).contains(1L); - assertThat((List) results.get(2)).allSatisfy( value -> assertThat((Long)value).isBetween(0L, 5000L)); + assertThat((List) results.get(2)).allSatisfy(value -> assertThat((Long) value).isBetween(0L, 5000L)); } - @Test + @Test // GH-3054 @EnabledOnCommand("HPEXPIRE") public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { @@ -3533,7 +3557,7 @@ public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); } - @Test + @Test // GH-3054 @EnabledOnCommand("HPEXPIRE") public void hpExpireReturnsTwoWhenZeroProvided() { @@ -3543,7 +3567,7 @@ public void hpExpireReturnsTwoWhenZeroProvided() { verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIREAT") public void hExpireAtReturnsSuccessAndSetsTTL() { @@ -3556,10 +3580,10 @@ public void hExpireAtReturnsSuccessAndSetsTTL() { List results = getResults(); assertThat(results.get(0)).isEqualTo(Boolean.TRUE); assertThat((List) results.get(1)).contains(1L); - assertThat((List) results.get(2)).allSatisfy( value -> assertThat((Long)value).isBetween(0L, 5L)); + assertThat((List) results.get(2)).allSatisfy(value -> assertThat((Long) value).isBetween(0L, 5L)); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIREAT") public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { @@ -3572,7 +3596,7 @@ public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIREAT") public void hExpireAtReturnsTwoWhenZeroProvided() { @@ -3584,7 +3608,7 @@ public void hExpireAtReturnsTwoWhenZeroProvided() { verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIREAT") public void hpExpireAtReturnsSuccessAndSetsTTL() { @@ -3597,10 +3621,10 @@ public void hpExpireAtReturnsSuccessAndSetsTTL() { List results = getResults(); assertThat(results.get(0)).isEqualTo(Boolean.TRUE); assertThat((List) results.get(1)).contains(1L); - assertThat((List) results.get(2)).allSatisfy( value -> assertThat((Long)value).isBetween(0L, 5L)); + assertThat((List) results.get(2)).allSatisfy(value -> assertThat((Long) value).isBetween(0L, 5L)); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIREAT") public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { @@ -3613,7 +3637,7 @@ public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); } - @Test + @Test // GH-3054 @EnabledOnCommand("HPEXPIREAT") public void hpExpireAdReturnsTwoWhenZeroProvided() { @@ -3625,7 +3649,7 @@ public void hpExpireAdReturnsTwoWhenZeroProvided() { verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); } - @Test + @Test // GH-3054 @EnabledOnCommand("HPERSIST") public void hPersistReturnsSuccessAndPersistsField() { @@ -3637,7 +3661,7 @@ public void hPersistReturnsSuccessAndPersistsField() { verifyResults(Arrays.asList(Boolean.TRUE, List.of(1L), List.of(1L), List.of(-1L))); } - @Test + @Test // GH-3054 @EnabledOnCommand("HPERSIST") public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { @@ -3647,7 +3671,7 @@ public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { verifyResults(Arrays.asList(Boolean.TRUE, List.of(-1L))); } - @Test + @Test // GH-3054 @EnabledOnCommand("HPERSIST") public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { @@ -3658,7 +3682,7 @@ public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); } - @Test + @Test // GH-3054 @EnabledOnCommand("HTTL") public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { @@ -3668,7 +3692,7 @@ public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { verifyResults(Arrays.asList(Boolean.TRUE, List.of(-1L))); } - @Test + @Test // GH-3054 @EnabledOnCommand("HTTL") public void hTtlReturnsMinusIndependendOfTimeUnitOneWhenFieldHasNoExpiration() { @@ -3678,7 +3702,7 @@ public void hTtlReturnsMinusIndependendOfTimeUnitOneWhenFieldHasNoExpiration() { verifyResults(Arrays.asList(Boolean.TRUE, List.of(-1L))); } - @Test + @Test // GH-3054 @EnabledOnCommand("HTTL") public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { diff --git a/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java b/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java index 4e41e60954..4bed88c0a5 100644 --- a/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java +++ b/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java @@ -15,17 +15,37 @@ */ package org.springframework.data.redis.connection.jedis; -import static org.assertj.core.api.Assertions.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.assertj.core.api.Assertions.fail; import static org.assertj.core.data.Offset.offset; -import static org.springframework.data.redis.connection.BitFieldSubCommands.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldIncrBy.Overflow.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.*; -import static org.springframework.data.redis.connection.ClusterTestVariables.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.DistanceUnit.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.*; -import static org.springframework.data.redis.connection.RedisListCommands.*; -import static org.springframework.data.redis.connection.RedisZSetCommands.*; -import static org.springframework.data.redis.core.ScanOptions.*; +import static org.springframework.data.redis.connection.BitFieldSubCommands.create; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldIncrBy.Overflow.FAIL; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.INT_8; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.signed; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.unsigned; +import static org.springframework.data.redis.connection.ClusterTestVariables.CLUSTER_HOST; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.MASTER_NODE_1_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.MASTER_NODE_2_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.MASTER_NODE_3_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.REPLICAOF_NODE_1_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.SAME_SLOT_KEY_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.SAME_SLOT_KEY_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.SAME_SLOT_KEY_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_3; +import static org.springframework.data.redis.connection.RedisGeoCommands.DistanceUnit.KILOMETERS; +import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs; +import static org.springframework.data.redis.connection.RedisListCommands.Direction; +import static org.springframework.data.redis.connection.RedisListCommands.Position; +import static org.springframework.data.redis.connection.RedisZSetCommands.Range; +import static org.springframework.data.redis.core.ScanOptions.NONE; +import static org.springframework.data.redis.core.ScanOptions.scanOptions; import redis.clients.jedis.ConnectionPool; import redis.clients.jedis.HostAndPort; @@ -37,14 +57,23 @@ import java.nio.charset.StandardCharsets; import java.time.Duration; import java.time.Instant; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.extension.ExtendWith; - import org.springframework.dao.DataAccessException; import org.springframework.dao.InvalidDataAccessApiUsageException; import org.springframework.data.domain.Range.Bound; @@ -1040,9 +1069,10 @@ public void hStrLenReturnsZeroWhenKeyDoesNotExist() { assertThat(clusterConnection.hashCommands().hStrLen(KEY_1_BYTES, KEY_1_BYTES)).isEqualTo(0L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); @@ -1050,9 +1080,10 @@ public void hExpireReturnsSuccessAndSetsTTL() { .allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); // missing field assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); @@ -1060,27 +1091,30 @@ public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { assertThat(clusterConnection.hashCommands().hExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5000L, KEY_2_BYTES)).contains(1L); - assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS,KEY_2_BYTES)) + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS, KEY_2_BYTES)) .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); // missing field assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); @@ -1088,26 +1122,31 @@ public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { assertThat(clusterConnection.hashCommands().hpExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireAtReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); - assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); @@ -1117,27 +1156,31 @@ public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { assertThat(clusterConnection.hashCommands().hExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireAdReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hpExpireAtReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS, KEY_2_BYTES)) .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); @@ -1147,55 +1190,60 @@ public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hpExpireAdReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hPersistReturnsSuccessAndPersistsField() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); - assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(1L); assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); - assertThat(clusterConnection.hashCommands().hPersist(KEY_3_BYTES,KEY_2_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_3_BYTES, KEY_2_BYTES)).contains(-2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); - assertThat(clusterConnection.hashCommands().hTtl(KEY_3_BYTES,KEY_2_BYTES)).contains(-2L); - + assertThat(clusterConnection.hashCommands().hTtl(KEY_3_BYTES, KEY_2_BYTES)).contains(-2L); } @Test // DATAREDIS-315 @@ -1346,7 +1394,7 @@ public void blMoveShouldMoveElementsCorrectly() { .isEqualTo(VALUE_2_BYTES); assertThat( clusterConnection.bLMove(SAME_SLOT_KEY_1_BYTES, SAME_SLOT_KEY_2_BYTES, Direction.RIGHT, Direction.LEFT, 0.01)) - .isNull(); + .isNull(); assertThat(nativeConnection.lrange(SAME_SLOT_KEY_1, 0, -1)).isEmpty(); assertThat(nativeConnection.lrange(SAME_SLOT_KEY_2, 0, -1)).containsExactly(VALUE_2, VALUE_3); @@ -2946,13 +2994,13 @@ void bitFieldIncrByWithOverflowShouldWorkCorrectly() { assertThat(clusterConnection.stringCommands().bitField(JedisConverters.toBytes(KEY_1), create().incr(unsigned(2)).valueAt(BitFieldSubCommands.Offset.offset(102L)).overflow(FAIL).by(1L))) - .containsExactly(1L); + .containsExactly(1L); assertThat(clusterConnection.stringCommands().bitField(JedisConverters.toBytes(KEY_1), create().incr(unsigned(2)).valueAt(BitFieldSubCommands.Offset.offset(102L)).overflow(FAIL).by(1L))) - .containsExactly(2L); + .containsExactly(2L); assertThat(clusterConnection.stringCommands().bitField(JedisConverters.toBytes(KEY_1), create().incr(unsigned(2)).valueAt(BitFieldSubCommands.Offset.offset(102L)).overflow(FAIL).by(1L))) - .containsExactly(3L); + .containsExactly(3L); assertThat(clusterConnection.stringCommands() .bitField(JedisConverters.toBytes(KEY_1), create().incr(unsigned(2)).valueAt(BitFieldSubCommands.Offset.offset(102L)).overflow(FAIL).by(1L)) @@ -2964,7 +3012,7 @@ void bitfieldShouldAllowMultipleSubcommands() { assertThat(clusterConnection.stringCommands().bitField(JedisConverters.toBytes(KEY_1), create().incr(signed(5)).valueAt(BitFieldSubCommands.Offset.offset(100L)).by(1L).get(unsigned(4)).valueAt(0L))) - .containsExactly(1L, 0L); + .containsExactly(1L, 0L); } @Test // DATAREDIS-562 @@ -2974,13 +3022,13 @@ void bitfieldShouldWorkUsingNonZeroBasedOffset() { clusterConnection.stringCommands().bitField(JedisConverters.toBytes(KEY_1), create().set(INT_8).valueAt(BitFieldSubCommands.Offset.offset(0L).multipliedByTypeLength()).to(100L) .set(INT_8).valueAt(BitFieldSubCommands.Offset.offset(1L).multipliedByTypeLength()).to(200L))) - .containsExactly(0L, 0L); + .containsExactly(0L, 0L); assertThat( clusterConnection.stringCommands() .bitField(JedisConverters.toBytes(KEY_1), create().get(INT_8).valueAt(BitFieldSubCommands.Offset.offset(0L).multipliedByTypeLength()).get(INT_8) - .valueAt(BitFieldSubCommands.Offset.offset(1L).multipliedByTypeLength()))).containsExactly(100L, - -56L); + .valueAt(BitFieldSubCommands.Offset.offset(1L).multipliedByTypeLength()))) + .containsExactly(100L, -56L); } @Test // DATAREDIS-1005 @@ -3125,7 +3173,8 @@ void shouldUseCachedTopology() { assertThat(topology).isInstanceOf(JedisClusterConnection.JedisClusterTopology.class); assertThat(provider.shouldUseCachedValue(null)).isFalse(); - assertThat(provider.shouldUseCachedValue(new JedisClusterConnection.JedisClusterTopology(Set.of(), System.currentTimeMillis() - 101, 100))).isFalse(); + assertThat(provider.shouldUseCachedValue( + new JedisClusterConnection.JedisClusterTopology(Set.of(), System.currentTimeMillis() - 101, 100))).isFalse(); assertThat(provider.shouldUseCachedValue( new JedisClusterConnection.JedisClusterTopology(Set.of(), System.currentTimeMillis() + 100, 100))).isTrue(); } diff --git a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java index 5611fb351f..1d45dc739e 100644 --- a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java +++ b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java @@ -15,16 +15,35 @@ */ package org.springframework.data.redis.connection.lettuce; -import static org.assertj.core.api.Assertions.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; import static org.assertj.core.data.Offset.offset; -import static org.springframework.data.redis.connection.BitFieldSubCommands.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldIncrBy.Overflow.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.*; -import static org.springframework.data.redis.connection.ClusterTestVariables.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.DistanceUnit.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.*; -import static org.springframework.data.redis.connection.RedisZSetCommands.*; -import static org.springframework.data.redis.core.ScanOptions.*; +import static org.springframework.data.redis.connection.BitFieldSubCommands.create; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldIncrBy.Overflow.FAIL; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.INT_8; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.signed; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.unsigned; +import static org.springframework.data.redis.connection.ClusterTestVariables.CLUSTER_HOST; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_4; +import static org.springframework.data.redis.connection.ClusterTestVariables.MASTER_NODE_1_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.MASTER_NODE_2_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.MASTER_NODE_3_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.REPLICAOF_NODE_1_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.SAME_SLOT_KEY_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.SAME_SLOT_KEY_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.SAME_SLOT_KEY_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_4; +import static org.springframework.data.redis.connection.RedisGeoCommands.DistanceUnit.KILOMETERS; +import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs; +import static org.springframework.data.redis.connection.RedisZSetCommands.Range; +import static org.springframework.data.redis.core.ScanOptions.scanOptions; import io.lettuce.core.cluster.RedisClusterClient; import io.lettuce.core.cluster.api.sync.RedisAdvancedClusterCommands; @@ -33,7 +52,17 @@ import java.nio.charset.StandardCharsets; import java.time.Duration; import java.time.Instant; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; import java.util.concurrent.TimeUnit; import org.assertj.core.data.Offset; @@ -49,11 +78,21 @@ import org.springframework.data.geo.Distance; import org.springframework.data.geo.GeoResults; import org.springframework.data.geo.Point; -import org.springframework.data.redis.connection.*; +import org.springframework.data.redis.connection.BitFieldSubCommands; +import org.springframework.data.redis.connection.ClusterConnectionTests; +import org.springframework.data.redis.connection.ClusterSlotHashUtil; +import org.springframework.data.redis.connection.ClusterTestVariables; +import org.springframework.data.redis.connection.DataType; +import org.springframework.data.redis.connection.DefaultSortParameters; import org.springframework.data.redis.connection.Limit; +import org.springframework.data.redis.connection.RedisClusterConfiguration; +import org.springframework.data.redis.connection.RedisClusterConnection; +import org.springframework.data.redis.connection.RedisClusterNode; import org.springframework.data.redis.connection.RedisClusterNode.SlotRange; import org.springframework.data.redis.connection.RedisGeoCommands.GeoLocation; +import org.springframework.data.redis.connection.RedisListCommands; import org.springframework.data.redis.connection.RedisListCommands.Position; +import org.springframework.data.redis.connection.RedisNode; import org.springframework.data.redis.connection.RedisServerCommands.FlushOption; import org.springframework.data.redis.connection.RedisStringCommands.BitOperation; import org.springframework.data.redis.connection.RedisStringCommands.SetOption; @@ -1097,7 +1136,7 @@ public void hStrLenReturnsZeroWhenKeyDoesNotExist() { assertThat(clusterConnection.hashCommands().hStrLen(KEY_1_BYTES, KEY_1_BYTES)).isEqualTo(0L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireReturnsSuccessAndSetsTTL() { @@ -1107,7 +1146,7 @@ public void hExpireReturnsSuccessAndSetsTTL() { assertThat(clusterConnection.hTtl(KEY_1_BYTES, KEY_2_BYTES)).allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { @@ -1118,17 +1157,19 @@ public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { assertThat(clusterConnection.hashCommands().hExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5000L, KEY_2_BYTES)).contains(1L); @@ -1136,7 +1177,7 @@ public void hpExpireReturnsSuccessAndSetsTTL() { .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { @@ -1147,7 +1188,7 @@ public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { assertThat(clusterConnection.hashCommands().hpExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hpExpireReturnsTwoWhenZeroProvided() { @@ -1156,17 +1197,18 @@ public void hpExpireReturnsTwoWhenZeroProvided() { assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireAtReturnsSuccessAndSetsTTL() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); assertThat(clusterConnection.hTtl(KEY_1_BYTES, KEY_2_BYTES)).allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { @@ -1180,9 +1222,10 @@ public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { assertThat(clusterConnection.hashCommands().hExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hExpireAdReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); @@ -1194,12 +1237,13 @@ public void hpExpireAtReturnsSuccessAndSetsTTL() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); assertThat(clusterConnection.hpTtl(KEY_1_BYTES, KEY_2_BYTES)) .allSatisfy(val -> assertThat(val).isGreaterThan(1000L).isLessThanOrEqualTo(5000L)); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { @@ -1212,7 +1256,7 @@ public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hpExpireAdReturnsTwoWhenZeroProvided() { @@ -1221,17 +1265,17 @@ public void hpExpireAdReturnsTwoWhenZeroProvided() { assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hPersistReturnsSuccessAndPersistsField() { nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); - assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(1L); assertThat(clusterConnection.hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { @@ -1239,17 +1283,18 @@ public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); - assertThat(clusterConnection.hashCommands().hPersist(KEY_3_BYTES,KEY_2_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_3_BYTES, KEY_2_BYTES)).contains(-2L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { @@ -1259,12 +1304,12 @@ public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, TimeUnit.HOURS, KEY_2_BYTES)).contains(-1L); } - @Test + @Test // GH-3054 @EnabledOnCommand("HEXPIRE") public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); - assertThat(clusterConnection.hashCommands().hTtl(KEY_3_BYTES,KEY_2_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_3_BYTES, KEY_2_BYTES)).contains(-2L); } @Test // DATAREDIS-315 diff --git a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java index 4ef5fcffe3..bc1f8cc204 100644 --- a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java @@ -15,9 +15,8 @@ */ package org.springframework.data.redis.connection.lettuce; -import static org.assertj.core.api.Assertions.*; +import static org.assertj.core.api.Assertions.assertThat; -import org.springframework.data.redis.test.condition.EnabledOnCommand; import reactor.test.StepVerifier; import java.nio.ByteBuffer; @@ -31,6 +30,7 @@ import java.util.Map; import org.springframework.data.redis.core.ScanOptions; +import org.springframework.data.redis.test.condition.EnabledOnCommand; import org.springframework.data.redis.test.extension.parametrized.ParameterizedRedisTest; /** @@ -107,8 +107,7 @@ void hMGetShouldReturnValueForFields() { nativeCommands.hset(KEY_1, FIELD_3, VALUE_3); connection.hashCommands().hMGet(KEY_1_BBUFFER, Arrays.asList(FIELD_1_BBUFFER, FIELD_3_BBUFFER)) - .as(StepVerifier::create) - .consumeNextWith(actual -> { + .as(StepVerifier::create).consumeNextWith(actual -> { assertThat(actual).contains(VALUE_1_BBUFFER, VALUE_3_BBUFFER); @@ -124,13 +123,11 @@ void hMGetShouldReturnNullValueForFieldsThatHaveNoValue() { connection.hashCommands().hMGet(KEY_1_BBUFFER, Collections.singletonList(FIELD_1_BBUFFER)).as(StepVerifier::create) .expectNext(Collections.singletonList(VALUE_1_BBUFFER)).verifyComplete(); - connection.hashCommands().hMGet(KEY_1_BBUFFER, Collections.singletonList(FIELD_2_BBUFFER)) - .as(StepVerifier::create) + connection.hashCommands().hMGet(KEY_1_BBUFFER, Collections.singletonList(FIELD_2_BBUFFER)).as(StepVerifier::create) .expectNext(Collections.singletonList(null)).verifyComplete(); connection.hashCommands().hMGet(KEY_1_BBUFFER, Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER)) - .as(StepVerifier::create) - .expectNext(Arrays.asList(VALUE_1_BBUFFER, null, VALUE_3_BBUFFER)).verifyComplete(); + .as(StepVerifier::create).expectNext(Arrays.asList(VALUE_1_BBUFFER, null, VALUE_3_BBUFFER)).verifyComplete(); } @ParameterizedRedisTest // DATAREDIS-525 @@ -197,8 +194,7 @@ void hDelShouldRemoveMultipleFieldsCorrectly() { nativeCommands.hset(KEY_1, FIELD_3, VALUE_3); connection.hashCommands().hDel(KEY_1_BBUFFER, Arrays.asList(FIELD_1_BBUFFER, FIELD_3_BBUFFER)) - .as(StepVerifier::create) - .expectNext(2L).verifyComplete(); + .as(StepVerifier::create).expectNext(2L).verifyComplete(); } @ParameterizedRedisTest // DATAREDIS-525 @@ -293,62 +289,50 @@ void hStrLenReturnsZeroWhenKeyDoesNotExist() { .verifyComplete(); } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void hExpireShouldHandleMultipleParametersCorrectly() { + assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); final var fields = Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER); connection.hashCommands().hExpire(KEY_1_BBUFFER, Duration.ofSeconds(1), fields).as(StepVerifier::create) // - .expectNext(1L) - .expectNext(1L) - .expectNext(-2L) - .expectComplete() - .verify(); + .expectNext(1L).expectNext(1L).expectNext(-2L).expectComplete().verify(); assertThat(nativeCommands.httl(KEY_1, FIELD_1)).allSatisfy(it -> assertThat(it).isBetween(0L, 1000L)); assertThat(nativeCommands.httl(KEY_1, FIELD_2)).allSatisfy(it -> assertThat(it).isBetween(0L, 1000L)); assertThat(nativeCommands.httl(KEY_1, FIELD_3)).allSatisfy(it -> assertThat(it).isEqualTo(-2L)); - } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void hExpireAtShouldHandleMultipleParametersCorrectly() { + assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); final var fields = Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER); connection.hashCommands().hExpireAt(KEY_1_BBUFFER, Instant.now().plusSeconds(1), fields).as(StepVerifier::create) // - .expectNext(1L) - .expectNext(1L) - .expectNext(-2L) - .expectComplete() - .verify(); + .expectNext(1L).expectNext(1L).expectNext(-2L).expectComplete().verify(); assertThat(nativeCommands.httl(KEY_1, FIELD_1, FIELD_2)).allSatisfy(it -> assertThat(it).isBetween(0L, 1000L)); assertThat(nativeCommands.httl(KEY_1, FIELD_3)).allSatisfy(it -> assertThat(it).isEqualTo(-2L)); - } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void hPersistShouldPersistFields() { + assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); - assertThat(nativeCommands.hexpire(KEY_1, 1000, FIELD_1)) - .allSatisfy(it -> assertThat(it).isEqualTo(1L)); + assertThat(nativeCommands.hexpire(KEY_1, 1000, FIELD_1)).allSatisfy(it -> assertThat(it).isEqualTo(1L)); final var fields = Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER); connection.hashCommands().hPersist(KEY_1_BBUFFER, fields).as(StepVerifier::create) // - .expectNext(1L) - .expectNext(-1L) - .expectNext(-2L) - .expectComplete() - .verify(); + .expectNext(1L).expectNext(-1L).expectNext(-2L).expectComplete().verify(); assertThat(nativeCommands.httl(KEY_1, FIELD_1, FIELD_2)).allSatisfy(it -> assertThat(it).isEqualTo(-1L)); } diff --git a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java index bbb84a76de..6499ae325a 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java @@ -15,8 +15,9 @@ */ package org.springframework.data.redis.core; -import static org.assertj.core.api.Assertions.*; -import static org.assertj.core.api.Assumptions.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assumptions.assumeThat; import java.io.IOException; import java.time.Duration; @@ -29,7 +30,6 @@ import org.assertj.core.api.InstanceOfAssertFactories; import org.junit.jupiter.api.BeforeEach; - import org.springframework.data.redis.ObjectFactory; import org.springframework.data.redis.RawObjectFactory; import org.springframework.data.redis.StringObjectFactory; @@ -212,7 +212,7 @@ void randomValue() { assertThat(values).hasSize(2).containsEntry(key1, val1).containsEntry(key2, val2); } - @EnabledOnCommand("HEXPIRE") + @EnabledOnCommand("HEXPIRE") // GH-3054 @ParameterizedRedisTest void testExpireAndGetExpireMillis() { @@ -237,7 +237,7 @@ void testExpireAndGetExpireMillis() { }); } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void testExpireAndGetExpireSeconds() { @@ -268,7 +268,7 @@ void testExpireAndGetExpireSeconds() { }); } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void testBoundExpireAndGetExpireSeconds() { @@ -300,7 +300,7 @@ void testBoundExpireAndGetExpireSeconds() { }); } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void testExpireAtAndGetExpireMillis() { @@ -325,7 +325,7 @@ void testExpireAtAndGetExpireMillis() { }); } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void expireThrowsErrorOfNanoPrecision() { @@ -336,7 +336,7 @@ void expireThrowsErrorOfNanoPrecision() { .isThrownBy(() -> redisTemplate.opsForHash().getTimeToLive(key, TimeUnit.NANOSECONDS, List.of(key1))); } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void testExpireWithOptionsNone() { @@ -349,12 +349,13 @@ void testExpireWithOptionsNone() { hashOps.put(key, key1, val1); hashOps.put(key, key2, val2); - ExpireChanges expire = redisTemplate.opsForHash().expire(key, org.springframework.data.redis.core.types.Expiration.seconds(20), FieldExpirationOptions.none(), List.of(key1)); + ExpireChanges expire = redisTemplate.opsForHash().expire(key, + org.springframework.data.redis.core.types.Expiration.seconds(20), FieldExpirationOptions.none(), List.of(key1)); assertThat(expire.allOk()).isTrue(); } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void testExpireWithOptions() { @@ -367,16 +368,20 @@ void testExpireWithOptions() { hashOps.put(key, key1, val1); hashOps.put(key, key2, val2); - redisTemplate.opsForHash().expire(key, org.springframework.data.redis.core.types.Expiration.seconds(20), FieldExpirationOptions.none(), List.of(key1)); - redisTemplate.opsForHash().expire(key, org.springframework.data.redis.core.types.Expiration.seconds(60), FieldExpirationOptions.none(), List.of(key2)); + redisTemplate.opsForHash().expire(key, org.springframework.data.redis.core.types.Expiration.seconds(20), + FieldExpirationOptions.none(), List.of(key1)); + redisTemplate.opsForHash().expire(key, org.springframework.data.redis.core.types.Expiration.seconds(60), + FieldExpirationOptions.none(), List.of(key2)); - ExpireChanges changes = redisTemplate.opsForHash().expire(key, org.springframework.data.redis.core.types.Expiration.seconds(30), FieldExpirationOptions.builder().gt().build(), List.of(key1, key2)); + ExpireChanges changes = redisTemplate.opsForHash().expire(key, + org.springframework.data.redis.core.types.Expiration.seconds(30), FieldExpirationOptions.builder().gt().build(), + List.of(key1, key2)); assertThat(changes.ok()).containsExactly(key1); assertThat(changes.skipped()).containsExactly(key2); } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void testPersistAndGetExpireMillis() { diff --git a/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java index 2d574ee123..48532b2feb 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java @@ -15,9 +15,9 @@ */ package org.springframework.data.redis.core; -import static org.assertj.core.api.Assertions.*; -import static org.assertj.core.api.Assumptions.*; -import static org.junit.jupiter.api.condition.OS.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assumptions.assumeThat; +import static org.junit.jupiter.api.condition.OS.MAC; import reactor.test.StepVerifier; @@ -33,7 +33,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.condition.DisabledOnOs; - import org.springframework.data.redis.ObjectFactory; import org.springframework.data.redis.RawObjectFactory; import org.springframework.data.redis.SettingsUtils; @@ -506,7 +505,7 @@ void scan() { .verifyComplete(); } - @EnabledOnCommand("HEXPIRE") + @EnabledOnCommand("HEXPIRE") // GH-3054 @ParameterizedRedisTest void testExpireAndGetExpireMillis() { @@ -531,7 +530,7 @@ void testExpireAndGetExpireMillis() { }).verifyComplete(); } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void testExpireWithOptions() { @@ -543,23 +542,32 @@ void testExpireWithOptions() { putAll(key, key1, val1, key2, val2); - hashOperations.expire(key, org.springframework.data.redis.core.types.Expiration.seconds(20), FieldExpirationOptions.none(), List.of(key1)).as(StepVerifier::create)// - .assertNext(changes -> { - assertThat(changes.allOk()).isTrue(); - }).verifyComplete(); - hashOperations.expire(key, org.springframework.data.redis.core.types.Expiration.seconds(60), FieldExpirationOptions.none(), List.of(key2)).as(StepVerifier::create)// - .assertNext(changes -> { - assertThat(changes.allOk()).isTrue(); - }).verifyComplete(); - - hashOperations.expire(key, org.springframework.data.redis.core.types.Expiration.seconds(30), FieldExpirationOptions.builder().gt().build(), List.of(key1, key2)).as(StepVerifier::create)// - .assertNext(changes -> { - assertThat(changes.ok()).containsExactly(key1); - assertThat(changes.skipped()).containsExactly(key2); - }).verifyComplete(); + hashOperations + .expire(key, org.springframework.data.redis.core.types.Expiration.seconds(20), FieldExpirationOptions.none(), + List.of(key1)) + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + hashOperations + .expire(key, org.springframework.data.redis.core.types.Expiration.seconds(60), FieldExpirationOptions.none(), + List.of(key2)) + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + + hashOperations + .expire(key, org.springframework.data.redis.core.types.Expiration.seconds(30), + FieldExpirationOptions.builder().gt().build(), List.of(key1, key2)) + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.ok()).containsExactly(key1); + assertThat(changes.skipped()).containsExactly(key2); + }).verifyComplete(); } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void testExpireAndGetExpireSeconds() { @@ -583,10 +591,9 @@ void testExpireAndGetExpireSeconds() { assertThat(it.expirationOf(key1).raw()).isBetween(0L, 5L); assertThat(it.expirationOf(key2).raw()).isBetween(0L, 5L); }).verifyComplete(); - } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void testExpireAtAndGetExpireMillis() { @@ -611,7 +618,7 @@ void testExpireAtAndGetExpireMillis() { }).verifyComplete(); } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void testPersistAndGetExpireMillis() { @@ -638,7 +645,6 @@ void testPersistAndGetExpireMillis() { .assertNext(expirations -> { assertThat(expirations.persistent()).contains(key1, key2); }).verifyComplete(); - } @ParameterizedRedisTest // DATAREDIS-602 diff --git a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java index 0a03b7340e..31e93d06ac 100644 --- a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java @@ -15,8 +15,9 @@ */ package org.springframework.data.redis.support.collections; -import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.*; -import static org.assertj.core.api.Assumptions.*; +import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat; +import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assumptions.assumeThat; import java.io.IOException; import java.text.DecimalFormat; @@ -35,7 +36,6 @@ import org.assertj.core.api.Assumptions; import org.junit.jupiter.api.BeforeEach; - import org.springframework.dao.InvalidDataAccessApiUsageException; import org.springframework.data.redis.DoubleAsStringObjectFactory; import org.springframework.data.redis.LongAsStringObjectFactory; @@ -197,7 +197,7 @@ void testIncrement() { assertThat(map.increment(k1, 10)).isEqualTo(Long.valueOf(Long.valueOf((String) v1) + 10)); } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void testExpire() { @@ -216,7 +216,7 @@ void testExpire() { assertThat(ops.persist()).satisfies(ExpireChanges::allOk); } - @ParameterizedRedisTest + @ParameterizedRedisTest // GH-3054 @EnabledOnCommand("HEXPIRE") void testExpireAt() {